public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:3.12 commit in: /
Date: Tue, 30 Sep 2014 17:16:28 +0000 (UTC)	[thread overview]
Message-ID: <1412097234.540c21f81b78f7c07ce2518aa9d3077f14ae25d6.mpagano@gentoo> (raw)

commit:     540c21f81b78f7c07ce2518aa9d3077f14ae25d6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Sep 30 17:13:54 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Sep 30 17:13:54 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=540c21f8

Linux patch 3.12.29

---
 0000_README              |    4 +
 1028_linux-3.12.29.patch | 5580 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5584 insertions(+)

diff --git a/0000_README b/0000_README
index 7f93023..ae0f6aa 100644
--- a/0000_README
+++ b/0000_README
@@ -154,6 +154,10 @@ Patch:  1027_linux-3.12.28.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.12.28
 
+Patch:  1028_linux-3.12.29.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.12.29
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1028_linux-3.12.29.patch b/1028_linux-3.12.29.patch
new file mode 100644
index 0000000..45fb6d1
--- /dev/null
+++ b/1028_linux-3.12.29.patch
@@ -0,0 +1,5580 @@
+diff --git a/Makefile b/Makefile
+index 300584fe5ad4..67cec33d00c7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 12
+-SUBLEVEL = 28
++SUBLEVEL = 29
+ EXTRAVERSION =
+ NAME = One Giant Leap for Frogkind
+ 
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 7ae8a1f00c3c..7af6183daa2e 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -183,9 +183,27 @@ void exit_thread(void)
+ {
+ }
+ 
++static void tls_thread_flush(void)
++{
++	asm ("msr tpidr_el0, xzr");
++
++	if (is_compat_task()) {
++		current->thread.tp_value = 0;
++
++		/*
++		 * We need to ensure ordering between the shadow state and the
++		 * hardware state, so that we don't corrupt the hardware state
++		 * with a stale shadow state during context switch.
++		 */
++		barrier();
++		asm ("msr tpidrro_el0, xzr");
++	}
++}
++
+ void flush_thread(void)
+ {
+ 	fpsimd_flush_thread();
++	tls_thread_flush();
+ 	flush_ptrace_hw_breakpoint(current);
+ }
+ 
+diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
+index 26e9c4eeaba8..78039927c807 100644
+--- a/arch/arm64/kernel/sys_compat.c
++++ b/arch/arm64/kernel/sys_compat.c
+@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
+ 
+ 	case __ARM_NR_compat_set_tls:
+ 		current->thread.tp_value = regs->regs[0];
++
++		/*
++		 * Protect against register corruption from context switch.
++		 * See comment in tls_thread_flush.
++		 */
++		barrier();
+ 		asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
+ 		return 0;
+ 
+diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
+index b212ae12e5ac..8a0079981cc8 100644
+--- a/arch/mips/cavium-octeon/setup.c
++++ b/arch/mips/cavium-octeon/setup.c
+@@ -458,6 +458,18 @@ static void octeon_halt(void)
+ 	octeon_kill_core(NULL);
+ }
+ 
++static char __read_mostly octeon_system_type[80];
++
++static int __init init_octeon_system_type(void)
++{
++	snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
++		cvmx_board_type_to_string(octeon_bootinfo->board_type),
++		octeon_model_get_string(read_c0_prid()));
++
++	return 0;
++}
++early_initcall(init_octeon_system_type);
++
+ /**
+  * Return a string representing the system type
+  *
+@@ -465,11 +477,7 @@ static void octeon_halt(void)
+  */
+ const char *octeon_board_type_string(void)
+ {
+-	static char name[80];
+-	sprintf(name, "%s (%s)",
+-		cvmx_board_type_to_string(octeon_bootinfo->board_type),
+-		octeon_model_get_string(read_c0_prid()));
+-	return name;
++	return octeon_system_type;
+ }
+ 
+ const char *get_system_type(void)
+diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
+index 5b5ddb231f26..78f18436cdf2 100644
+--- a/arch/mips/kernel/irq-gic.c
++++ b/arch/mips/kernel/irq-gic.c
+@@ -255,11 +255,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
+ 
+ 	/* Setup Intr to Pin mapping */
+ 	if (pin & GIC_MAP_TO_NMI_MSK) {
++		int i;
++
+ 		GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
+ 		/* FIXME: hack to route NMI to all cpu's */
+-		for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
++		for (i = 0; i < NR_CPUS; i += 32) {
+ 			GICWRITE(GIC_REG_ADDR(SHARED,
+-					  GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
++					  GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
+ 				 0xffffffff);
+ 		}
+ 	} else {
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 8ae1ebef8b71..5404cab551f3 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -162,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
+ 		__get_user(fregs[i], i + (__u64 __user *) data);
+ 
+ 	__get_user(child->thread.fpu.fcr31, data + 64);
++	child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ 
+ 	/* FIR may not be written.  */
+ 
+@@ -452,7 +453,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ 			break;
+ #endif
+ 		case FPC_CSR:
+-			child->thread.fpu.fcr31 = data;
++			child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
+ 			break;
+ 		case DSP_BASE ... DSP_BASE + 5: {
+ 			dspreg_t *dregs;
+diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
+index c369a5d35527..b897dde93e7a 100644
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -605,7 +605,6 @@ static void emulate_load_store_insn(struct pt_regs *regs,
+ 	case sdc1_op:
+ 		die_if_kernel("Unaligned FP access in kernel code", regs);
+ 		BUG_ON(!used_math());
+-		BUG_ON(!is_fpu_owner());
+ 
+ 		lose_fpu(1);	/* Save FPU state for the emulator. */
+ 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 9bb3a9363b06..db7a050f5c2c 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -1333,6 +1333,7 @@ static void build_r4000_tlb_refill_handler(void)
+ 	}
+ #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ 	uasm_l_tlb_huge_update(&l, p);
++	UASM_i_LW(&p, K0, 0, K1);
+ 	build_huge_update_entries(&p, htlb_info.huge_pte, K1);
+ 	build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
+ 				   htlb_info.restore_scratch);
+diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
+index d8a455ede5a7..fec8bf97d806 100644
+--- a/arch/openrisc/kernel/entry.S
++++ b/arch/openrisc/kernel/entry.S
+@@ -853,37 +853,44 @@ UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
+ 
+ /* ========================================================[ return ] === */
+ 
++_resume_userspace:
++	DISABLE_INTERRUPTS(r3,r4)
++	l.lwz	r4,TI_FLAGS(r10)
++	l.andi	r13,r4,_TIF_WORK_MASK
++	l.sfeqi	r13,0
++	l.bf	_restore_all
++	 l.nop
++
+ _work_pending:
+-	/*
+-	 * if (current_thread_info->flags & _TIF_NEED_RESCHED)
+-	 *     schedule();
+-	 */
+-	l.lwz   r5,TI_FLAGS(r10)
+-	l.andi	r3,r5,_TIF_NEED_RESCHED
+-	l.sfnei r3,0
+-	l.bnf   _work_notifysig
++	l.lwz	r5,PT_ORIG_GPR11(r1)
++	l.sfltsi r5,0
++	l.bnf	1f
+ 	 l.nop
+-	l.jal   schedule
++	l.andi	r5,r5,0
++1:
++	l.jal	do_work_pending
++	 l.ori	r3,r1,0			/* pt_regs */
++
++	l.sfeqi	r11,0
++	l.bf	_restore_all
+ 	 l.nop
+-	l.j	_resume_userspace
++	l.sfltsi r11,0
++	l.bnf	1f
+ 	 l.nop
+-
+-/* Handle pending signals and notify-resume requests.
+- * do_notify_resume must be passed the latest pushed pt_regs, not
+- * necessarily the "userspace" ones.  Also, pt_regs->syscallno
+- * must be set so that the syscall restart functionality works.
+- */
+-_work_notifysig:
+-	l.jal	do_notify_resume
+-	 l.ori	r3,r1,0		  /* pt_regs */
+-
+-_resume_userspace:
+-	DISABLE_INTERRUPTS(r3,r4)
+-	l.lwz	r3,TI_FLAGS(r10)
+-	l.andi	r3,r3,_TIF_WORK_MASK
+-	l.sfnei	r3,0
+-	l.bf	_work_pending
++	l.and	r11,r11,r0
++	l.ori	r11,r11,__NR_restart_syscall
++	l.j	_syscall_check_trace_enter
+ 	 l.nop
++1:
++	l.lwz	r11,PT_ORIG_GPR11(r1)
++	/* Restore arg registers */
++	l.lwz	r3,PT_GPR3(r1)
++	l.lwz	r4,PT_GPR4(r1)
++	l.lwz	r5,PT_GPR5(r1)
++	l.lwz	r6,PT_GPR6(r1)
++	l.lwz	r7,PT_GPR7(r1)
++	l.j	_syscall_check_trace_enter
++	 l.lwz	r8,PT_GPR8(r1)
+ 
+ _restore_all:
+ 	RESTORE_ALL
+diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
+index ae167f7e081a..c277ec82783d 100644
+--- a/arch/openrisc/kernel/signal.c
++++ b/arch/openrisc/kernel/signal.c
+@@ -28,24 +28,24 @@
+ #include <linux/tracehook.h>
+ 
+ #include <asm/processor.h>
++#include <asm/syscall.h>
+ #include <asm/ucontext.h>
+ #include <asm/uaccess.h>
+ 
+ #define DEBUG_SIG 0
+ 
+ struct rt_sigframe {
+-	struct siginfo *pinfo;
+-	void *puc;
+ 	struct siginfo info;
+ 	struct ucontext uc;
+ 	unsigned char retcode[16];	/* trampoline code */
+ };
+ 
+-static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
++static int restore_sigcontext(struct pt_regs *regs,
++			      struct sigcontext __user *sc)
+ {
+-	unsigned int err = 0;
++	int err = 0;
+ 
+-	/* Alwys make any pending restarted system call return -EINTR */
++	/* Always make any pending restarted system calls return -EINTR */
+ 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ 
+ 	/*
+@@ -53,25 +53,21 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+ 	 * (sc is already checked for VERIFY_READ since the sigframe was
+ 	 *  checked in sys_sigreturn previously)
+ 	 */
+-	if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
+-		goto badframe;
+-	if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
+-		goto badframe;
+-	if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
+-		goto badframe;
++	err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
++	err |= __copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long));
++	err |= __copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long));
+ 
+ 	/* make sure the SM-bit is cleared so user-mode cannot fool us */
+ 	regs->sr &= ~SPR_SR_SM;
+ 
++	regs->orig_gpr11 = -1;	/* Avoid syscall restart checks */
++
+ 	/* TODO: the other ports use regs->orig_XX to disable syscall checks
+ 	 * after this completes, but we don't use that mechanism. maybe we can
+ 	 * use it now ?
+ 	 */
+ 
+ 	return err;
+-
+-badframe:
+-	return 1;
+ }
+ 
+ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
+@@ -111,21 +107,18 @@ badframe:
+  * Set up a signal frame.
+  */
+ 
+-static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+-			    unsigned long mask)
++static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+ {
+ 	int err = 0;
+ 
+ 	/* copy the regs */
+-
++	/* There should be no need to save callee-saved registers here...
++	 * ...but we save them anyway.  Revisit this
++	 */
+ 	err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
+ 	err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
+ 	err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
+ 
+-	/* then some other stuff */
+-
+-	err |= __put_user(mask, &sc->oldmask);
+-
+ 	return err;
+ }
+ 
+@@ -181,24 +174,18 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 	int err = 0;
+ 
+ 	frame = get_sigframe(ka, regs, sizeof(*frame));
+-
+ 	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ 		goto give_sigsegv;
+ 
+-	err |= __put_user(&frame->info, &frame->pinfo);
+-	err |= __put_user(&frame->uc, &frame->puc);
+-
++	/* Create siginfo.  */
+ 	if (ka->sa.sa_flags & SA_SIGINFO)
+ 		err |= copy_siginfo_to_user(&frame->info, info);
+-	if (err)
+-		goto give_sigsegv;
+ 
+-	/* Clear all the bits of the ucontext we don't use.  */
+-	err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
++	/* Create the ucontext.  */
+ 	err |= __put_user(0, &frame->uc.uc_flags);
+ 	err |= __put_user(NULL, &frame->uc.uc_link);
+ 	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+-	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
++	err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
+ 
+ 	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ 
+@@ -207,9 +194,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 
+ 	/* trampoline - the desired return ip is the retcode itself */
+ 	return_ip = (unsigned long)&frame->retcode;
+-	/* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
+-	err |= __put_user(0xa960, (short *)(frame->retcode + 0));
+-	err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2));
++	/* This is:
++		l.ori r11,r0,__NR_sigreturn
++		l.sys 1
++	 */
++	err |= __put_user(0xa960,             (short *)(frame->retcode + 0));
++	err |= __put_user(__NR_rt_sigreturn,  (short *)(frame->retcode + 2));
+ 	err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
+ 	err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
+ 
+@@ -262,82 +252,106 @@ handle_signal(unsigned long sig,
+  * mode below.
+  */
+ 
+-void do_signal(struct pt_regs *regs)
++int do_signal(struct pt_regs *regs, int syscall)
+ {
+ 	siginfo_t info;
+ 	int signr;
+ 	struct k_sigaction ka;
+-
+-	/*
+-	 * We want the common case to go fast, which
+-	 * is why we may in certain cases get here from
+-	 * kernel mode. Just return without doing anything
+-	 * if so.
+-	 */
+-	if (!user_mode(regs))
+-		return;
+-
+-	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+-
+-	/* If we are coming out of a syscall then we need
+-	 * to check if the syscall was interrupted and wants to be
+-	 * restarted after handling the signal.  If so, the original
+-	 * syscall number is put back into r11 and the PC rewound to
+-	 * point at the l.sys instruction that resulted in the
+-	 * original syscall.  Syscall results other than the four
+-	 * below mean that the syscall executed to completion and no
+-	 * restart is necessary.
+-	 */
+-	if (regs->orig_gpr11) {
+-		int restart = 0;
+-
+-		switch (regs->gpr[11]) {
++	unsigned long continue_addr = 0;
++	unsigned long restart_addr = 0;
++	unsigned long retval = 0;
++	int restart = 0;
++
++	if (syscall) {
++		continue_addr = regs->pc;
++		restart_addr = continue_addr - 4;
++		retval = regs->gpr[11];
++
++		/*
++		 * Setup syscall restart here so that a debugger will
++		 * see the already changed PC.
++		 */
++		switch (retval) {
+ 		case -ERESTART_RESTARTBLOCK:
++			restart = -2;
++			/* Fall through */
+ 		case -ERESTARTNOHAND:
+-			/* Restart if there is no signal handler */
+-			restart = (signr <= 0);
+-			break;
+ 		case -ERESTARTSYS:
+-			/* Restart if there no signal handler or
+-			 * SA_RESTART flag is set */
+-			restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART));
+-			break;
+ 		case -ERESTARTNOINTR:
+-			/* Always restart */
+-			restart = 1;
++			restart++;
++			regs->gpr[11] = regs->orig_gpr11;
++			regs->pc = restart_addr;
+ 			break;
+ 		}
++	}
+ 
+-		if (restart) {
+-			if (regs->gpr[11] == -ERESTART_RESTARTBLOCK)
+-				regs->gpr[11] = __NR_restart_syscall;
+-			else
+-				regs->gpr[11] = regs->orig_gpr11;
+-			regs->pc -= 4;
+-		} else {
+-			regs->gpr[11] = -EINTR;
++	/*
++	 * Get the signal to deliver.  When running under ptrace, at this
++	 * point the debugger may change all our registers ...
++	 */
++	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
++	/*
++	 * Depending on the signal settings we may need to revert the
++	 * decision to restart the system call.  But skip this if a
++	 * debugger has chosen to restart at a different PC.
++	 */
++	if (signr > 0) {
++		if (unlikely(restart) && regs->pc == restart_addr) {
++			if (retval == -ERESTARTNOHAND ||
++			    retval == -ERESTART_RESTARTBLOCK
++			    || (retval == -ERESTARTSYS
++			        && !(ka.sa.sa_flags & SA_RESTART))) {
++				/* No automatic restart */
++				regs->gpr[11] = -EINTR;
++				regs->pc = continue_addr;
++			}
+ 		}
+-	}
+ 
+-	if (signr <= 0) {
+-		/* no signal to deliver so we just put the saved sigmask
+-		 * back */
+-		restore_saved_sigmask();
+-	} else {		/* signr > 0 */
+-		/* Whee!  Actually deliver the signal.  */
+ 		handle_signal(signr, &info, &ka, regs);
++	} else {
++		/* no handler */
++		restore_saved_sigmask();
++		/*
++		 * Restore pt_regs PC as syscall restart will be handled by
++		 * kernel without return to userspace
++		 */
++		if (unlikely(restart) && regs->pc == restart_addr) {
++			regs->pc = continue_addr;
++			return restart;
++		}
+ 	}
+ 
+-	return;
++	return 0;
+ }
+ 
+-asmlinkage void do_notify_resume(struct pt_regs *regs)
++asmlinkage int
++do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ {
+-	if (current_thread_info()->flags & _TIF_SIGPENDING)
+-		do_signal(regs);
+-
+-	if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
+-		clear_thread_flag(TIF_NOTIFY_RESUME);
+-		tracehook_notify_resume(regs);
+-	}
++	do {
++		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++			schedule();
++		} else {
++			if (unlikely(!user_mode(regs)))
++				return 0;
++			local_irq_enable();
++			if (thread_flags & _TIF_SIGPENDING) {
++				int restart = do_signal(regs, syscall);
++				if (unlikely(restart)) {
++					/*
++					 * Restart without handlers.
++					 * Deal with it without leaving
++					 * the kernel space.
++					 */
++					return restart;
++				}
++				syscall = 0;
++			} else {
++				clear_thread_flag(TIF_NOTIFY_RESUME);
++				tracehook_notify_resume(regs);
++			}
++		}
++		local_irq_disable();
++		thread_flags = current_thread_info()->flags;
++	} while (thread_flags & _TIF_WORK_MASK);
++	return 0;
+ }
+diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
+index 8b480901165a..3a52b9b01133 100644
+--- a/arch/powerpc/include/asm/machdep.h
++++ b/arch/powerpc/include/asm/machdep.h
+@@ -57,10 +57,10 @@ struct machdep_calls {
+ 	void            (*hpte_removebolted)(unsigned long ea,
+ 					     int psize, int ssize);
+ 	void		(*flush_hash_range)(unsigned long number, int local);
+-	void		(*hugepage_invalidate)(struct mm_struct *mm,
++	void		(*hugepage_invalidate)(unsigned long vsid,
++					       unsigned long addr,
+ 					       unsigned char *hpte_slot_array,
+-					       unsigned long addr, int psize);
+-
++					       int psize, int ssize);
+ 	/* special for kexec, to be called in real mode, linear mapping is
+ 	 * destroyed as well */
+ 	void		(*hpte_clear_all)(void);
+diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
+index 46db09414a10..832a39d042d4 100644
+--- a/arch/powerpc/include/asm/pgtable-ppc64.h
++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
+@@ -409,7 +409,7 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
+ }
+ 
+ extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+-				   pmd_t *pmdp);
++				   pmd_t *pmdp, unsigned long old_pmd);
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
+ extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
+diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
+index d836d945068d..9ecede1e124c 100644
+--- a/arch/powerpc/include/asm/pte-hash64-64k.h
++++ b/arch/powerpc/include/asm/pte-hash64-64k.h
+@@ -46,11 +46,31 @@
+  * in order to deal with 64K made of 4K HW pages. Thus we override the
+  * generic accessors and iterators here
+  */
+-#define __real_pte(e,p) 	((real_pte_t) { \
+-			(e), (pte_val(e) & _PAGE_COMBO) ? \
+-				(pte_val(*((p) + PTRS_PER_PTE))) : 0 })
+-#define __rpte_to_hidx(r,index)	((pte_val((r).pte) & _PAGE_COMBO) ? \
+-        (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
++#define __real_pte __real_pte
++static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
++{
++	real_pte_t rpte;
++
++	rpte.pte = pte;
++	rpte.hidx = 0;
++	if (pte_val(pte) & _PAGE_COMBO) {
++		/*
++		 * Make sure we order the hidx load against the _PAGE_COMBO
++		 * check. The store side ordering is done in __hash_page_4K
++		 */
++		smp_rmb();
++		rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
++	}
++	return rpte;
++}
++
++static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
++{
++	if ((pte_val(rpte.pte) & _PAGE_COMBO))
++		return (rpte.hidx >> (index<<2)) & 0xf;
++	return (pte_val(rpte.pte) >> 12) & 0xf;
++}
++
+ #define __rpte_to_pte(r)	((r).pte)
+ #define __rpte_sub_valid(rpte, index) \
+ 	(pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
+diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
+index c33d939120c9..9ca9c160dee4 100644
+--- a/arch/powerpc/mm/hash_native_64.c
++++ b/arch/powerpc/mm/hash_native_64.c
+@@ -413,18 +413,18 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
+ 	local_irq_restore(flags);
+ }
+ 
+-static void native_hugepage_invalidate(struct mm_struct *mm,
++static void native_hugepage_invalidate(unsigned long vsid,
++				       unsigned long addr,
+ 				       unsigned char *hpte_slot_array,
+-				       unsigned long addr, int psize)
++				       int psize, int ssize)
+ {
+-	int ssize = 0, i;
+-	int lock_tlbie;
++	int i;
+ 	struct hash_pte *hptep;
+ 	int actual_psize = MMU_PAGE_16M;
+ 	unsigned int max_hpte_count, valid;
+ 	unsigned long flags, s_addr = addr;
+ 	unsigned long hpte_v, want_v, shift;
+-	unsigned long hidx, vpn = 0, vsid, hash, slot;
++	unsigned long hidx, vpn = 0, hash, slot;
+ 
+ 	shift = mmu_psize_defs[psize].shift;
+ 	max_hpte_count = 1U << (PMD_SHIFT - shift);
+@@ -438,15 +438,6 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
+ 
+ 		/* get the vpn */
+ 		addr = s_addr + (i * (1ul << shift));
+-		if (!is_kernel_addr(addr)) {
+-			ssize = user_segment_size(addr);
+-			vsid = get_vsid(mm->context.id, addr, ssize);
+-			WARN_ON(vsid == 0);
+-		} else {
+-			vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+-			ssize = mmu_kernel_ssize;
+-		}
+-
+ 		vpn = hpt_vpn(addr, vsid, ssize);
+ 		hash = hpt_hash(vpn, shift, ssize);
+ 		if (hidx & _PTEIDX_SECONDARY)
+@@ -466,22 +457,13 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
+ 		else
+ 			/* Invalidate the hpte. NOTE: this also unlocks it */
+ 			hptep->v = 0;
++		/*
++		 * We need to do tlb invalidate for all the address, tlbie
++		 * instruction compares entry_VA in tlb with the VA specified
++		 * here
++		 */
++		tlbie(vpn, psize, actual_psize, ssize, 0);
+ 	}
+-	/*
+-	 * Since this is a hugepage, we just need a single tlbie.
+-	 * use the last vpn.
+-	 */
+-	lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+-	if (lock_tlbie)
+-		raw_spin_lock(&native_tlbie_lock);
+-
+-	asm volatile("ptesync":::"memory");
+-	__tlbie(vpn, psize, actual_psize, ssize);
+-	asm volatile("eieio; tlbsync; ptesync":::"memory");
+-
+-	if (lock_tlbie)
+-		raw_spin_unlock(&native_tlbie_lock);
+-
+ 	local_irq_restore(flags);
+ }
+ 
+diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
+index 34de9e0cdc34..7d86c868040d 100644
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -18,6 +18,57 @@
+ #include <linux/mm.h>
+ #include <asm/machdep.h>
+ 
++static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
++				pmd_t *pmdp, unsigned int psize, int ssize)
++{
++	int i, max_hpte_count, valid;
++	unsigned long s_addr;
++	unsigned char *hpte_slot_array;
++	unsigned long hidx, shift, vpn, hash, slot;
++
++	s_addr = addr & HPAGE_PMD_MASK;
++	hpte_slot_array = get_hpte_slot_array(pmdp);
++	/*
++	 * IF we try to do a HUGE PTE update after a withdraw is done.
++	 * we will find the below NULL. This happens when we do
++	 * split_huge_page_pmd
++	 */
++	if (!hpte_slot_array)
++		return;
++
++	if (ppc_md.hugepage_invalidate)
++		return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
++						  psize, ssize);
++	/*
++	 * No bluk hpte removal support, invalidate each entry
++	 */
++	shift = mmu_psize_defs[psize].shift;
++	max_hpte_count = HPAGE_PMD_SIZE >> shift;
++	for (i = 0; i < max_hpte_count; i++) {
++		/*
++		 * 8 bits per each hpte entries
++		 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
++		 */
++		valid = hpte_valid(hpte_slot_array, i);
++		if (!valid)
++			continue;
++		hidx =  hpte_hash_index(hpte_slot_array, i);
++
++		/* get the vpn */
++		addr = s_addr + (i * (1ul << shift));
++		vpn = hpt_vpn(addr, vsid, ssize);
++		hash = hpt_hash(vpn, shift, ssize);
++		if (hidx & _PTEIDX_SECONDARY)
++			hash = ~hash;
++
++		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
++		slot += hidx & _PTEIDX_GROUP_IX;
++		ppc_md.hpte_invalidate(slot, vpn, psize,
++				       MMU_PAGE_16M, ssize, 0);
++	}
++}
++
++
+ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 		    pmd_t *pmdp, unsigned long trap, int local, int ssize,
+ 		    unsigned int psize)
+@@ -33,7 +84,9 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	 * atomically mark the linux large page PMD busy and dirty
+ 	 */
+ 	do {
+-		old_pmd = pmd_val(*pmdp);
++		pmd_t pmd = ACCESS_ONCE(*pmdp);
++
++		old_pmd = pmd_val(pmd);
+ 		/* If PMD busy, retry the access */
+ 		if (unlikely(old_pmd & _PAGE_BUSY))
+ 			return 0;
+@@ -85,6 +138,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	vpn = hpt_vpn(ea, vsid, ssize);
+ 	hash = hpt_hash(vpn, shift, ssize);
+ 	hpte_slot_array = get_hpte_slot_array(pmdp);
++	if (psize == MMU_PAGE_4K) {
++		/*
++		 * invalidate the old hpte entry if we have that mapped via 64K
++		 * base page size. This is because demote_segment won't flush
++		 * hash page table entries.
++		 */
++		if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
++			invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize);
++	}
+ 
+ 	valid = hpte_valid(hpte_slot_array, index);
+ 	if (valid) {
+@@ -107,11 +169,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 			 * safely update this here.
+ 			 */
+ 			valid = 0;
+-			new_pmd &= ~_PAGE_HPTEFLAGS;
+ 			hpte_slot_array[index] = 0;
+-		} else
+-			/* clear the busy bits and set the hash pte bits */
+-			new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
++		}
+ 	}
+ 
+ 	if (!valid) {
+@@ -119,15 +178,13 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 
+ 		/* insert new entry */
+ 		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
+-repeat:
+-		hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+-
+-		/* clear the busy bits and set the hash pte bits */
+-		new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
++		new_pmd |= _PAGE_HASHPTE;
+ 
+ 		/* Add in WIMG bits */
+ 		rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
+ 				      _PAGE_COHERENT | _PAGE_GUARDED));
++repeat:
++		hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ 
+ 		/* Insert into the hash table, primary slot */
+ 		slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+@@ -168,8 +225,17 @@ repeat:
+ 		mark_hpte_slot_valid(hpte_slot_array, index, slot);
+ 	}
+ 	/*
+-	 * No need to use ldarx/stdcx here
++	 * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
++	 * base page size 4k.
++	 */
++	if (psize == MMU_PAGE_4K)
++		new_pmd |= _PAGE_COMBO;
++	/*
++	 * The hpte valid is stored in the pgtable whose address is in the
++	 * second half of the PMD. Order this against clearing of the busy bit in
++	 * huge pmd.
+ 	 */
++	smp_wmb();
+ 	*pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
+ 	return 0;
+ }
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index 14c05547bd74..e91079b796d2 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -589,8 +589,8 @@ static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
+ 	case CPU_UP_CANCELED:
+ 	case CPU_UP_CANCELED_FROZEN:
+ 		unmap_cpu_from_node(lcpu);
+-		break;
+ 		ret = NOTIFY_OK;
++		break;
+ #endif
+ 	}
+ 	return ret;
+diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
+index 536eec72c0f7..c9379a2d6006 100644
+--- a/arch/powerpc/mm/pgtable_64.c
++++ b/arch/powerpc/mm/pgtable_64.c
+@@ -524,7 +524,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
+ 	*pmdp = __pmd(old & ~clr);
+ #endif
+ 	if (old & _PAGE_HASHPTE)
+-		hpte_do_hugepage_flush(mm, addr, pmdp);
++		hpte_do_hugepage_flush(mm, addr, pmdp, old);
+ 	return old;
+ }
+ 
+@@ -631,7 +631,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
+ 	if (!(old & _PAGE_SPLITTING)) {
+ 		/* We need to flush the hpte */
+ 		if (old & _PAGE_HASHPTE)
+-			hpte_do_hugepage_flush(vma->vm_mm, address, pmdp);
++			hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
+ 	}
+ }
+ 
+@@ -704,7 +704,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+  * neesd to be flushed.
+  */
+ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+-			    pmd_t *pmdp)
++			    pmd_t *pmdp, unsigned long old_pmd)
+ {
+ 	int ssize, i;
+ 	unsigned long s_addr;
+@@ -726,12 +726,29 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+ 	if (!hpte_slot_array)
+ 		return;
+ 
+-	/* get the base page size */
++	/* get the base page size,vsid and segment size */
++#ifdef CONFIG_DEBUG_VM
+ 	psize = get_slice_psize(mm, s_addr);
++	BUG_ON(psize == MMU_PAGE_16M);
++#endif
++	if (old_pmd & _PAGE_COMBO)
++		psize = MMU_PAGE_4K;
++	else
++		psize = MMU_PAGE_64K;
++
++	if (!is_kernel_addr(s_addr)) {
++		ssize = user_segment_size(s_addr);
++		vsid = get_vsid(mm->context.id, s_addr, ssize);
++		WARN_ON(vsid == 0);
++	} else {
++		vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize);
++		ssize = mmu_kernel_ssize;
++	}
+ 
+ 	if (ppc_md.hugepage_invalidate)
+-		return ppc_md.hugepage_invalidate(mm, hpte_slot_array,
+-						  s_addr, psize);
++		return ppc_md.hugepage_invalidate(vsid, s_addr,
++						  hpte_slot_array,
++						  psize, ssize);
+ 	/*
+ 	 * No bluk hpte removal support, invalidate each entry
+ 	 */
+@@ -749,15 +766,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+ 
+ 		/* get the vpn */
+ 		addr = s_addr + (i * (1ul << shift));
+-		if (!is_kernel_addr(addr)) {
+-			ssize = user_segment_size(addr);
+-			vsid = get_vsid(mm->context.id, addr, ssize);
+-			WARN_ON(vsid == 0);
+-		} else {
+-			vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+-			ssize = mmu_kernel_ssize;
+-		}
+-
+ 		vpn = hpt_vpn(addr, vsid, ssize);
+ 		hash = hpt_hash(vpn, shift, ssize);
+ 		if (hidx & _PTEIDX_SECONDARY)
+diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
+index 36e44b4260eb..c66e445d9890 100644
+--- a/arch/powerpc/mm/tlb_hash64.c
++++ b/arch/powerpc/mm/tlb_hash64.c
+@@ -217,7 +217,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
+ 		if (!(pte & _PAGE_HASHPTE))
+ 			continue;
+ 		if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
+-			hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
++			hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
+ 		else
+ 			hpte_need_flush(mm, start, ptep, pte, 0);
+ 	}
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index 9a432de363b8..bebe64ed5dc3 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -158,7 +158,7 @@ static int pseries_remove_memory(struct device_node *np)
+ static inline int pseries_remove_memblock(unsigned long base,
+ 					  unsigned int memblock_size)
+ {
+-	return -EOPNOTSUPP;
++	return 0;
+ }
+ static inline int pseries_remove_memory(struct device_node *np)
+ {
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 0307901e4132..261c5095d5d3 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -731,13 +731,13 @@ static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u6
+ 			np->full_name, ret, ddw_avail[2], liobn);
+ }
+ 
+-static void remove_ddw(struct device_node *np)
++static void remove_ddw(struct device_node *np, bool remove_prop)
+ {
+ 	struct dynamic_dma_window_prop *dwp;
+ 	struct property *win64;
+ 	const u32 *ddw_avail;
+ 	u64 liobn;
+-	int len, ret;
++	int len, ret = 0;
+ 
+ 	ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len);
+ 	win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
+@@ -763,7 +763,8 @@ static void remove_ddw(struct device_node *np)
+ 	__remove_ddw(np, ddw_avail, liobn);
+ 
+ delprop:
+-	ret = of_remove_property(np, win64);
++	if (remove_prop)
++		ret = of_remove_property(np, win64);
+ 	if (ret)
+ 		pr_warning("%s: failed to remove direct window property: %d\n",
+ 			np->full_name, ret);
+@@ -835,7 +836,7 @@ static int find_existing_ddw_windows(void)
+ 		 * can clear the table or find the holes. To that end,
+ 		 * first, remove any existing DDW configuration.
+ 		 */
+-		remove_ddw(pdn);
++		remove_ddw(pdn, true);
+ 
+ 		/*
+ 		 * Second, if we are running on a new enough level of
+@@ -1125,7 +1126,7 @@ out_free_window:
+ 	kfree(window);
+ 
+ out_clear_window:
+-	remove_ddw(pdn);
++	remove_ddw(pdn, true);
+ 
+ out_free_prop:
+ 	kfree(win64->name);
+@@ -1337,7 +1338,14 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
+ 
+ 	switch (action) {
+ 	case OF_RECONFIG_DETACH_NODE:
+-		remove_ddw(np);
++		/*
++		 * Removing the property will invoke the reconfig
++		 * notifier again, which causes dead-lock on the
++		 * read-write semaphore of the notifier chain. So
++		 * we have to remove the property when releasing
++		 * the device node.
++		 */
++		remove_ddw(np, false);
+ 		if (pci && pci->iommu_table)
+ 			iommu_free_table(pci->iommu_table, np->full_name);
+ 
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 356bc75ca74f..691a479f7d97 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -412,16 +412,17 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
+ 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
+ }
+ 
+-static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
+-				       unsigned char *hpte_slot_array,
+-				       unsigned long addr, int psize)
++static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
++					     unsigned long addr,
++					     unsigned char *hpte_slot_array,
++					     int psize, int ssize)
+ {
+-	int ssize = 0, i, index = 0;
++	int i, index = 0;
+ 	unsigned long s_addr = addr;
+ 	unsigned int max_hpte_count, valid;
+ 	unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
+ 	unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
+-	unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
++	unsigned long shift, hidx, vpn = 0, hash, slot;
+ 
+ 	shift = mmu_psize_defs[psize].shift;
+ 	max_hpte_count = 1U << (PMD_SHIFT - shift);
+@@ -434,15 +435,6 @@ static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
+ 
+ 		/* get the vpn */
+ 		addr = s_addr + (i * (1ul << shift));
+-		if (!is_kernel_addr(addr)) {
+-			ssize = user_segment_size(addr);
+-			vsid = get_vsid(mm->context.id, addr, ssize);
+-			WARN_ON(vsid == 0);
+-		} else {
+-			vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
+-			ssize = mmu_kernel_ssize;
+-		}
+-
+ 		vpn = hpt_vpn(addr, vsid, ssize);
+ 		hash = hpt_hash(vpn, shift, ssize);
+ 		if (hidx & _PTEIDX_SECONDARY)
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 6671e8db1861..faa97bd4948e 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -93,6 +93,7 @@ config S390
+ 	select ARCH_INLINE_WRITE_UNLOCK_IRQ
+ 	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+ 	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
++	select ARCH_SUPPORTS_ATOMIC_RMW
+ 	select ARCH_USE_CMPXCHG_LOCKREF
+ 	select ARCH_WANT_IPC_PARSE_VERSION
+ 	select BUILDTIME_EXTABLE_SORT
+diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
+index 0ea10f27d613..cb6cfcd034cf 100644
+--- a/arch/x86/include/asm/irq.h
++++ b/arch/x86/include/asm/irq.h
+@@ -25,6 +25,7 @@ extern void irq_ctx_init(int cpu);
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+ #include <linux/cpumask.h>
++extern int check_irq_vectors_for_cpu_disable(void);
+ extern void fixup_irqs(void);
+ extern void irq_force_complete_move(int);
+ #endif
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 22d0687e7fda..39100783cf26 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -262,6 +262,83 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
+ EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
++
++/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
++ * below, which is protected by stop_machine().  Putting them on the stack
++ * results in a stack frame overflow.  Dynamically allocating could result in a
++ * failure so declare these two cpumasks as global.
++ */
++static struct cpumask affinity_new, online_new;
++
++/*
++ * This cpu is going to be removed and its vectors migrated to the remaining
++ * online cpus.  Check to see if there are enough vectors in the remaining cpus.
++ * This function is protected by stop_machine().
++ */
++int check_irq_vectors_for_cpu_disable(void)
++{
++	int irq, cpu;
++	unsigned int this_cpu, vector, this_count, count;
++	struct irq_desc *desc;
++	struct irq_data *data;
++
++	this_cpu = smp_processor_id();
++	cpumask_copy(&online_new, cpu_online_mask);
++	cpu_clear(this_cpu, online_new);
++
++	this_count = 0;
++	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
++		irq = __this_cpu_read(vector_irq[vector]);
++		if (irq >= 0) {
++			desc = irq_to_desc(irq);
++			data = irq_desc_get_irq_data(desc);
++			cpumask_copy(&affinity_new, data->affinity);
++			cpu_clear(this_cpu, affinity_new);
++
++			/* Do not count inactive or per-cpu irqs. */
++			if (!irq_has_action(irq) || irqd_is_per_cpu(data))
++				continue;
++
++			/*
++			 * A single irq may be mapped to multiple
++			 * cpu's vector_irq[] (for example IOAPIC cluster
++			 * mode).  In this case we have two
++			 * possibilities:
++			 *
++			 * 1) the resulting affinity mask is empty; that is
++			 * this the down'd cpu is the last cpu in the irq's
++			 * affinity mask, or
++			 *
++			 * 2) the resulting affinity mask is no longer
++			 * a subset of the online cpus but the affinity
++			 * mask is not zero; that is the down'd cpu is the
++			 * last online cpu in a user set affinity mask.
++			 */
++			if (cpumask_empty(&affinity_new) ||
++			    !cpumask_subset(&affinity_new, &online_new))
++				this_count++;
++		}
++	}
++
++	count = 0;
++	for_each_online_cpu(cpu) {
++		if (cpu == this_cpu)
++			continue;
++		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
++		     vector++) {
++			if (per_cpu(vector_irq, cpu)[vector] < 0)
++				count++;
++		}
++	}
++
++	if (count < this_count) {
++		pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
++			this_cpu, this_count, count);
++		return -ERANGE;
++	}
++	return 0;
++}
++
+ /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
+ void fixup_irqs(void)
+ {
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 42c26a485533..b17dfe212233 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1317,6 +1317,12 @@ void cpu_disable_common(void)
+ 
+ int native_cpu_disable(void)
+ {
++	int ret;
++
++	ret = check_irq_vectors_for_cpu_disable();
++	if (ret)
++		return ret;
++
+ 	clear_local_APIC();
+ 
+ 	cpu_disable_common();
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index d8f80e733cf8..a573d4bd71d9 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -866,6 +866,13 @@ void blkcg_drain_queue(struct request_queue *q)
+ 	if (!q->root_blkg)
+ 		return;
+ 
++	/*
++	 * @q could be exiting and already have destroyed all blkgs as
++	 * indicated by NULL root_blkg.  If so, don't confuse policies.
++	 */
++	if (!q->root_blkg)
++		return;
++
+ 	blk_throtl_drain(q);
+ }
+ 
+diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
+index 1731c27c36a6..2cac1d1f3863 100644
+--- a/drivers/acpi/acpica/utcopy.c
++++ b/drivers/acpi/acpica/utcopy.c
+@@ -1001,5 +1001,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
+ 		status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
+ 	}
+ 
++	/* Delete the allocated object if copy failed */
++
++	if (ACPI_FAILURE(status)) {
++		acpi_ut_remove_reference(*dest_desc);
++	}
++
+ 	return_ACPI_STATUS(status);
+ }
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index c7414a545a4f..2a4ae32c4b97 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -1099,9 +1099,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+ 
+ 	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
+ 
+-		cpuidle_pause_and_lock();
+ 		/* Protect against cpu-hotplug */
+ 		get_online_cpus();
++		cpuidle_pause_and_lock();
+ 
+ 		/* Disable all cpuidle devices */
+ 		for_each_online_cpu(cpu) {
+@@ -1128,8 +1128,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+ 				cpuidle_enable_device(dev);
+ 			}
+ 		}
+-		put_online_cpus();
+ 		cpuidle_resume_and_unlock();
++		put_online_cpus();
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index efa328bf6724..a875de67fb7c 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -304,6 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
++	{ PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
++	{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
++	{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+@@ -441,6 +449,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
+ 	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
++	  .driver_data = board_ahci_yes_fbs },			/* 88se9182 */
++	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
+ 	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 */
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
+ 	  .driver_data = board_ahci_yes_fbs },			/* 88se9172 on some Gigabyte */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 0d9a2f674819..5d0bc51bafea 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4227,7 +4227,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Crucial_CT???M500SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 	{ "Micron_M550*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+-	{ "Crucial_CT???M550SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
++	{ "Crucial_CT*M550SSD*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM, },
+ 
+ 	/*
+ 	 * Some WD SATA-I drives spin up and down erratically when the link
+diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
+index f35f15f4d83e..f7badaa39eb6 100644
+--- a/drivers/ata/pata_scc.c
++++ b/drivers/ata/pata_scc.c
+@@ -586,7 +586,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
+  *	Note: Original code is ata_bus_softreset().
+  */
+ 
+-static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
++static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+                                       unsigned long deadline)
+ {
+ 	struct ata_ioports *ioaddr = &ap->ioaddr;
+@@ -600,9 +600,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ 	udelay(20);
+ 	out_be32(ioaddr->ctl_addr, ap->ctl);
+ 
+-	scc_wait_after_reset(&ap->link, devmask, deadline);
+-
+-	return 0;
++	return scc_wait_after_reset(&ap->link, devmask, deadline);
+ }
+ 
+ /**
+@@ -619,7 +617,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
+ {
+ 	struct ata_port *ap = link->ap;
+ 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+-	unsigned int devmask = 0, err_mask;
++	unsigned int devmask = 0;
++	int rc;
+ 	u8 err;
+ 
+ 	DPRINTK("ENTER\n");
+@@ -635,9 +634,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
+ 
+ 	/* issue bus reset */
+ 	DPRINTK("about to softreset, devmask=%x\n", devmask);
+-	err_mask = scc_bus_softreset(ap, devmask, deadline);
+-	if (err_mask) {
+-		ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
++	rc = scc_bus_softreset(ap, devmask, deadline);
++	if (rc) {
++		ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
+ 		return -EIO;
+ 	}
+ 
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index e3c974a6c522..48138b311460 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -533,11 +533,10 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
+ int tpm_get_timeouts(struct tpm_chip *chip)
+ {
+ 	struct tpm_cmd_t tpm_cmd;
+-	struct timeout_t *timeout_cap;
++	unsigned long new_timeout[4];
++	unsigned long old_timeout[4];
+ 	struct duration_t *duration_cap;
+ 	ssize_t rc;
+-	u32 timeout;
+-	unsigned int scale = 1;
+ 
+ 	tpm_cmd.header.in = tpm_getcap_header;
+ 	tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+@@ -571,25 +570,46 @@ int tpm_get_timeouts(struct tpm_chip *chip)
+ 	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
+ 		return -EINVAL;
+ 
+-	timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
+-	/* Don't overwrite default if value is 0 */
+-	timeout = be32_to_cpu(timeout_cap->a);
+-	if (timeout && timeout < 1000) {
+-		/* timeouts in msec rather usec */
+-		scale = 1000;
+-		chip->vendor.timeout_adjusted = true;
++	old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
++	old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
++	old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
++	old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
++	memcpy(new_timeout, old_timeout, sizeof(new_timeout));
++
++	/*
++	 * Provide ability for vendor overrides of timeout values in case
++	 * of misreporting.
++	 */
++	if (chip->vendor.update_timeouts != NULL)
++		chip->vendor.timeout_adjusted =
++			chip->vendor.update_timeouts(chip, new_timeout);
++
++	if (!chip->vendor.timeout_adjusted) {
++		/* Don't overwrite default if value is 0 */
++		if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
++			int i;
++
++			/* timeouts in msec rather usec */
++			for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
++				new_timeout[i] *= 1000;
++			chip->vendor.timeout_adjusted = true;
++		}
+ 	}
+-	if (timeout)
+-		chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
+-	timeout = be32_to_cpu(timeout_cap->b);
+-	if (timeout)
+-		chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
+-	timeout = be32_to_cpu(timeout_cap->c);
+-	if (timeout)
+-		chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
+-	timeout = be32_to_cpu(timeout_cap->d);
+-	if (timeout)
+-		chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
++
++	/* Report adjusted timeouts */
++	if (chip->vendor.timeout_adjusted) {
++		dev_info(chip->dev,
++			 HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
++			 old_timeout[0], new_timeout[0],
++			 old_timeout[1], new_timeout[1],
++			 old_timeout[2], new_timeout[2],
++			 old_timeout[3], new_timeout[3]);
++	}
++
++	chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]);
++	chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]);
++	chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]);
++	chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]);
+ 
+ duration:
+ 	tpm_cmd.header.in = tpm_getcap_header;
+@@ -1423,13 +1443,13 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+ 	int err, total = 0, retries = 5;
+ 	u8 *dest = out;
+ 
++	if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
++		return -EINVAL;
++
+ 	chip = tpm_chip_find_get(chip_num);
+ 	if (chip == NULL)
+ 		return -ENODEV;
+ 
+-	if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
+-		return -EINVAL;
+-
+ 	do {
+ 		tpm_cmd.header.in = tpm_getrandom_header;
+ 		tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
+@@ -1448,6 +1468,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+ 		num_bytes -= recd;
+ 	} while (retries-- && total < max);
+ 
++	tpm_chip_put(chip);
+ 	return total ? total : -EIO;
+ }
+ EXPORT_SYMBOL_GPL(tpm_get_random);
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index a7bfc176ed43..b911d79fbd58 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -95,6 +95,9 @@ struct tpm_vendor_specific {
+ 	int (*send) (struct tpm_chip *, u8 *, size_t);
+ 	void (*cancel) (struct tpm_chip *);
+ 	u8 (*status) (struct tpm_chip *);
++	bool (*update_timeouts)(struct tpm_chip *chip,
++				unsigned long *timeout_cap);
++
+ 	void (*release) (struct device *);
+ 	struct miscdevice miscdev;
+ 	struct attribute_group *attr_group;
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 5796d0157ce0..e7b1a0ae4300 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -373,6 +373,36 @@ out_err:
+ 	return rc;
+ }
+ 
++struct tis_vendor_timeout_override {
++	u32 did_vid;
++	unsigned long timeout_us[4];
++};
++
++static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
++	/* Atmel 3204 */
++	{ 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
++			(TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
++};
++
++static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
++				    unsigned long *timeout_cap)
++{
++	int i;
++	u32 did_vid;
++
++	did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
++
++	for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
++		if (vendor_timeout_overrides[i].did_vid != did_vid)
++			continue;
++		memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
++		       sizeof(vendor_timeout_overrides[i].timeout_us));
++		return true;
++	}
++
++	return false;
++}
++
+ /*
+  * Early probing for iTPM with STS_DATA_EXPECT flaw.
+  * Try sending command without itpm flag set and if that
+@@ -475,6 +505,7 @@ static struct tpm_vendor_specific tpm_tis = {
+ 	.recv = tpm_tis_recv,
+ 	.send = tpm_tis_send,
+ 	.cancel = tpm_tis_ready,
++	.update_timeouts = tpm_tis_update_timeouts,
+ 	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ 	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ 	.req_canceled = tpm_tis_req_canceled,
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index b22659cccca4..e6125522860a 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
+  */
+ static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
+ {
+-	WARN_ON(!spin_is_locked(&__efivars->lock));
++	lockdep_assert_held(&__efivars->lock);
+ 
+ 	list_del(&entry->list);
+ 	spin_unlock_irq(&__efivars->lock);
+@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
+ 	const struct efivar_operations *ops = __efivars->ops;
+ 	efi_status_t status;
+ 
+-	WARN_ON(!spin_is_locked(&__efivars->lock));
++	lockdep_assert_held(&__efivars->lock);
+ 
+ 	status = ops->set_variable(entry->var.VariableName,
+ 				   &entry->var.VendorGuid,
+@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ 	int strsize1, strsize2;
+ 	bool found = false;
+ 
+-	WARN_ON(!spin_is_locked(&__efivars->lock));
++	lockdep_assert_held(&__efivars->lock);
+ 
+ 	list_for_each_entry_safe(entry, n, head, list) {
+ 		strsize1 = ucs2_strsize(name, 1024);
+@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ 	const struct efivar_operations *ops = __efivars->ops;
+ 	efi_status_t status;
+ 
+-	WARN_ON(!spin_is_locked(&__efivars->lock));
++	lockdep_assert_held(&__efivars->lock);
+ 
+ 	status = ops->get_variable(entry->var.VariableName,
+ 				   &entry->var.VendorGuid,
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 7507fe036b6e..1ceb95a3bbe0 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -423,6 +423,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ 		}
+ 	}
+ 
++	/* Enforce ordering by reading HEAD register back */
++	I915_READ_HEAD(ring);
++
+ 	/* Initialize the ring. This must happen _after_ we've cleared the ring
+ 	 * registers with the above sequence (the readback of the HEAD registers
+ 	 * also enforces ordering), otherwise the hw might lose the new ring
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 85ef9ff42aa6..9d9770d201ae 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -4769,12 +4769,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
+ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+ {
+ 	struct radeon_ring *ring = &rdev->ring[ridx];
++	int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
+ 
+ 	if (vm == NULL)
+ 		return;
+ 
+ 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+-	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
++	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
+ 				 WRITE_DATA_DST_SEL(0)));
+ 	if (vm->id < 8) {
+ 		radeon_ring_write(ring,
+@@ -4833,7 +4834,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+ 	radeon_ring_write(ring, 1 << vm->id);
+ 
+ 	/* compute doesn't have PFP */
+-	if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
++	if (usepfp) {
+ 		/* sync PFP to ME, otherwise we might get invalid PFP reads */
+ 		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ 		radeon_ring_write(ring, 0x0);
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index dfa641277175..402d4630d13e 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -1963,7 +1963,7 @@ static const char *thermal_controller_names[] = {
+ 	"adm1032",
+ 	"adm1030",
+ 	"max6649",
+-	"lm64",
++	"lm63", /* lm64 */
+ 	"f75375",
+ 	"asc7xxx",
+ };
+@@ -1974,7 +1974,7 @@ static const char *pp_lib_thermal_controller_names[] = {
+ 	"adm1032",
+ 	"adm1030",
+ 	"max6649",
+-	"lm64",
++	"lm63", /* lm64 */
+ 	"f75375",
+ 	"RV6xx",
+ 	"RV770",
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 4d41a0dc1796..53769e9cf595 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -4757,7 +4757,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+ 
+ 	/* write new base address */
+ 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+-	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
++	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ 				 WRITE_DATA_DST_SEL(0)));
+ 
+ 	if (vm->id < 8) {
+diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
+index d700698a1f22..bf980ea2b593 100644
+--- a/drivers/gpu/drm/radeon/trinity_dpm.c
++++ b/drivers/gpu/drm/radeon/trinity_dpm.c
+@@ -1868,7 +1868,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
+ 	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
+ 		pi->at[i] = TRINITY_AT_DFLT;
+ 
+-	pi->enable_bapm = false;
++	/* There are stability issues reported on with
++	 * bapm enabled when switching between AC and battery
++	 * power.  At the same time, some MSI boards hang
++	 * if it's not enabled and dpm is enabled.  Just enable
++	 * it for MSI boards right now.
++	 */
++	if (rdev->pdev->subsystem_vendor == 0x1462)
++		pi->enable_bapm = true;
++	else
++		pi->enable_bapm = false;
+ 	pi->enable_nbps_policy = true;
+ 	pi->enable_sclk_ds = true;
+ 	pi->enable_gfx_power_gating = true;
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+index 116da199b942..af1b17a0db66 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+@@ -122,6 +122,7 @@ static int tilcdc_unload(struct drm_device *dev)
+ 	struct tilcdc_drm_private *priv = dev->dev_private;
+ 	struct tilcdc_module *mod, *cur;
+ 
++	drm_fbdev_cma_fini(priv->fbdev);
+ 	drm_kms_helper_poll_fini(dev);
+ 	drm_mode_config_cleanup(dev);
+ 	drm_vblank_cleanup(dev);
+@@ -628,10 +629,10 @@ static int __init tilcdc_drm_init(void)
+ static void __exit tilcdc_drm_fini(void)
+ {
+ 	DBG("fini");
+-	tilcdc_tfp410_fini();
+-	tilcdc_slave_fini();
+-	tilcdc_panel_fini();
+ 	platform_driver_unregister(&tilcdc_platform_driver);
++	tilcdc_panel_fini();
++	tilcdc_slave_fini();
++	tilcdc_tfp410_fini();
+ }
+ 
+ late_initcall(tilcdc_drm_init);
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+index 86c67329b605..b085dcc54fb5 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+@@ -151,6 +151,7 @@ struct panel_connector {
+ static void panel_connector_destroy(struct drm_connector *connector)
+ {
+ 	struct panel_connector *panel_connector = to_panel_connector(connector);
++	drm_sysfs_connector_remove(connector);
+ 	drm_connector_cleanup(connector);
+ 	kfree(panel_connector);
+ }
+@@ -285,10 +286,8 @@ static void panel_destroy(struct tilcdc_module *mod)
+ {
+ 	struct panel_module *panel_mod = to_panel_module(mod);
+ 
+-	if (panel_mod->timings) {
++	if (panel_mod->timings)
+ 		display_timings_release(panel_mod->timings);
+-		kfree(panel_mod->timings);
+-	}
+ 
+ 	tilcdc_module_cleanup(mod);
+ 	kfree(panel_mod->info);
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+index 595068ba2d5e..2f83ffb7f37e 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+@@ -166,6 +166,7 @@ struct slave_connector {
+ static void slave_connector_destroy(struct drm_connector *connector)
+ {
+ 	struct slave_connector *slave_connector = to_slave_connector(connector);
++	drm_sysfs_connector_remove(connector);
+ 	drm_connector_cleanup(connector);
+ 	kfree(slave_connector);
+ }
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+index c38b56b268ac..ce75ac8de4f8 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+@@ -167,6 +167,7 @@ struct tfp410_connector {
+ static void tfp410_connector_destroy(struct drm_connector *connector)
+ {
+ 	struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
++	drm_sysfs_connector_remove(connector);
+ 	drm_connector_cleanup(connector);
+ 	kfree(tfp410_connector);
+ }
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index 863bef9f9234..cf4bad2c1d59 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
+  *
+  * @pool: to free the pages from
+  * @free_all: If set to true will free all pages in pool
++ * @gfp: GFP flags.
+  **/
+-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
++static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
++			      gfp_t gfp)
+ {
+ 	unsigned long irq_flags;
+ 	struct page *p;
+@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+ 	if (NUM_PAGES_TO_ALLOC < nr_free)
+ 		npages_to_free = NUM_PAGES_TO_ALLOC;
+ 
+-	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+-			GFP_KERNEL);
++	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
+ 	if (!pages_to_free) {
+ 		pr_err("Failed to allocate memory for pool free operation\n");
+ 		return 0;
+@@ -382,32 +383,35 @@ out:
+  *
+  * XXX: (dchinner) Deadlock warning!
+  *
+- * ttm_page_pool_free() does memory allocation using GFP_KERNEL.  that means
+- * this can deadlock when called a sc->gfp_mask that is not equal to
+- * GFP_KERNEL.
++ * We need to pass sc->gfp_mask to ttm_page_pool_free().
+  *
+  * This code is crying out for a shrinker per pool....
+  */
+ static unsigned long
+ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
+-	static atomic_t start_pool = ATOMIC_INIT(0);
++	static DEFINE_MUTEX(lock);
++	static unsigned start_pool;
+ 	unsigned i;
+-	unsigned pool_offset = atomic_add_return(1, &start_pool);
++	unsigned pool_offset;
+ 	struct ttm_page_pool *pool;
+ 	int shrink_pages = sc->nr_to_scan;
+ 	unsigned long freed = 0;
+ 
+-	pool_offset = pool_offset % NUM_POOLS;
++	if (!mutex_trylock(&lock))
++		return SHRINK_STOP;
++	pool_offset = ++start_pool % NUM_POOLS;
+ 	/* select start pool in round robin fashion */
+ 	for (i = 0; i < NUM_POOLS; ++i) {
+ 		unsigned nr_free = shrink_pages;
+ 		if (shrink_pages == 0)
+ 			break;
+ 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
+-		shrink_pages = ttm_page_pool_free(pool, nr_free);
++		shrink_pages = ttm_page_pool_free(pool, nr_free,
++						  sc->gfp_mask);
+ 		freed += nr_free - shrink_pages;
+ 	}
++	mutex_unlock(&lock);
+ 	return freed;
+ }
+ 
+@@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+ 	}
+ 	spin_unlock_irqrestore(&pool->lock, irq_flags);
+ 	if (npages)
+-		ttm_page_pool_free(pool, npages);
++		ttm_page_pool_free(pool, npages, GFP_KERNEL);
+ }
+ 
+ /*
+@@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)
+ 	ttm_pool_mm_shrink_fini(_manager);
+ 
+ 	for (i = 0; i < NUM_POOLS; ++i)
+-		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
++		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
++				   GFP_KERNEL);
+ 
+ 	kobject_put(&_manager->kobj);
+ 	_manager = NULL;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+index 7957beeeaf73..ae86e3513631 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+@@ -410,8 +410,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+  *
+  * @pool: to free the pages from
+  * @nr_free: If set to true will free all pages in pool
++ * @gfp: GFP flags.
+  **/
+-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
++static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
++				       gfp_t gfp)
+ {
+ 	unsigned long irq_flags;
+ 	struct dma_page *dma_p, *tmp;
+@@ -429,8 +431,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+ 			 npages_to_free, nr_free);
+ 	}
+ #endif
+-	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+-			GFP_KERNEL);
++	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
+ 
+ 	if (!pages_to_free) {
+ 		pr_err("%s: Failed to allocate memory for pool free operation\n",
+@@ -529,7 +530,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+ 		if (pool->type != type)
+ 			continue;
+ 		/* Takes a spinlock.. */
+-		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
++		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
+ 		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+ 		/* This code path is called after _all_ references to the
+ 		 * struct device has been dropped - so nobody should be
+@@ -982,7 +983,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+ 
+ 	/* shrink pool if necessary (only on !is_cached pools)*/
+ 	if (npages)
+-		ttm_dma_page_pool_free(pool, npages);
++		ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
+ 	ttm->state = tt_unpopulated;
+ }
+ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+@@ -992,10 +993,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+  *
+  * XXX: (dchinner) Deadlock warning!
+  *
+- * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
+- * needs to be paid to sc->gfp_mask to determine if this can be done or not.
+- * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
+- * bad.
++ * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
+  *
+  * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
+  * shrinkers
+@@ -1003,9 +1001,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+ static unsigned long
+ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
+-	static atomic_t start_pool = ATOMIC_INIT(0);
++	static unsigned start_pool;
+ 	unsigned idx = 0;
+-	unsigned pool_offset = atomic_add_return(1, &start_pool);
++	unsigned pool_offset;
+ 	unsigned shrink_pages = sc->nr_to_scan;
+ 	struct device_pools *p;
+ 	unsigned long freed = 0;
+@@ -1013,8 +1011,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 	if (list_empty(&_manager->pools))
+ 		return SHRINK_STOP;
+ 
+-	mutex_lock(&_manager->lock);
+-	pool_offset = pool_offset % _manager->npools;
++	if (!mutex_trylock(&_manager->lock))
++		return SHRINK_STOP;
++	if (!_manager->npools)
++		goto out;
++	pool_offset = ++start_pool % _manager->npools;
+ 	list_for_each_entry(p, &_manager->pools, pools) {
+ 		unsigned nr_free;
+ 
+@@ -1026,13 +1027,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 		if (++idx < pool_offset)
+ 			continue;
+ 		nr_free = shrink_pages;
+-		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
++		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
++						      sc->gfp_mask);
+ 		freed += nr_free - shrink_pages;
+ 
+ 		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+ 			 p->pool->dev_name, p->pool->name, current->pid,
+ 			 nr_free, shrink_pages);
+ 	}
++out:
+ 	mutex_unlock(&_manager->lock);
+ 	return freed;
+ }
+@@ -1043,7 +1046,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+ 	struct device_pools *p;
+ 	unsigned long count = 0;
+ 
+-	mutex_lock(&_manager->lock);
++	if (!mutex_trylock(&_manager->lock))
++		return 0;
+ 	list_for_each_entry(p, &_manager->pools, pools)
+ 		count += p->pool->npages_free;
+ 	mutex_unlock(&_manager->lock);
+diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
+index c47c2034ca71..4293e89bbbdd 100644
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -46,6 +46,7 @@
+ #include <linux/completion.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/sysctl.h>
+ 
+ #include <rdma/iw_cm.h>
+ #include <rdma/ib_addr.h>
+@@ -65,6 +66,20 @@ struct iwcm_work {
+ 	struct list_head free_list;
+ };
+ 
++static unsigned int default_backlog = 256;
++
++static struct ctl_table_header *iwcm_ctl_table_hdr;
++static struct ctl_table iwcm_ctl_table[] = {
++	{
++		.procname	= "default_backlog",
++		.data		= &default_backlog,
++		.maxlen		= sizeof(default_backlog),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec,
++	},
++	{ }
++};
++
+ /*
+  * The following services provide a mechanism for pre-allocating iwcm_work
+  * elements.  The design pre-allocates them  based on the cm_id type:
+@@ -419,6 +434,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
+ 
+ 	cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
+ 
++	if (!backlog)
++		backlog = default_backlog;
++
+ 	ret = alloc_work_entries(cm_id_priv, backlog);
+ 	if (ret)
+ 		return ret;
+@@ -1024,11 +1042,20 @@ static int __init iw_cm_init(void)
+ 	if (!iwcm_wq)
+ 		return -ENOMEM;
+ 
++	iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
++						 iwcm_ctl_table);
++	if (!iwcm_ctl_table_hdr) {
++		pr_err("iw_cm: couldn't register sysctl paths\n");
++		destroy_workqueue(iwcm_wq);
++		return -ENOMEM;
++	}
++
+ 	return 0;
+ }
+ 
+ static void __exit iw_cm_cleanup(void)
+ {
++	unregister_net_sysctl_table(iwcm_ctl_table_hdr);
+ 	destroy_workqueue(iwcm_wq);
+ }
+ 
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 024fa025a7ab..15984e1c0b61 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -93,6 +93,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
+ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+ 
+ static struct scsi_transport_template *ib_srp_transport_template;
++static struct workqueue_struct *srp_remove_wq;
+ 
+ static struct ib_client srp_client = {
+ 	.name   = "srp",
+@@ -458,7 +459,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
+ 	spin_unlock_irq(&target->lock);
+ 
+ 	if (changed)
+-		queue_work(system_long_wq, &target->remove_work);
++		queue_work(srp_remove_wq, &target->remove_work);
+ 
+ 	return changed;
+ }
+@@ -2602,9 +2603,10 @@ static void srp_remove_one(struct ib_device *device)
+ 		spin_unlock(&host->target_lock);
+ 
+ 		/*
+-		 * Wait for target port removal tasks.
++		 * Wait for tl_err and target port removal tasks.
+ 		 */
+ 		flush_workqueue(system_long_wq);
++		flush_workqueue(srp_remove_wq);
+ 
+ 		kfree(host);
+ 	}
+@@ -2649,16 +2651,22 @@ static int __init srp_init_module(void)
+ 		indirect_sg_entries = cmd_sg_entries;
+ 	}
+ 
++	srp_remove_wq = create_workqueue("srp_remove");
++	if (IS_ERR(srp_remove_wq)) {
++		ret = PTR_ERR(srp_remove_wq);
++		goto out;
++	}
++
++	ret = -ENOMEM;
+ 	ib_srp_transport_template =
+ 		srp_attach_transport(&ib_srp_transport_functions);
+ 	if (!ib_srp_transport_template)
+-		return -ENOMEM;
++		goto destroy_wq;
+ 
+ 	ret = class_register(&srp_class);
+ 	if (ret) {
+ 		pr_err("couldn't register class infiniband_srp\n");
+-		srp_release_transport(ib_srp_transport_template);
+-		return ret;
++		goto release_tr;
+ 	}
+ 
+ 	ib_sa_register_client(&srp_sa_client);
+@@ -2666,13 +2674,22 @@ static int __init srp_init_module(void)
+ 	ret = ib_register_client(&srp_client);
+ 	if (ret) {
+ 		pr_err("couldn't register IB client\n");
+-		srp_release_transport(ib_srp_transport_template);
+-		ib_sa_unregister_client(&srp_sa_client);
+-		class_unregister(&srp_class);
+-		return ret;
++		goto unreg_sa;
+ 	}
+ 
+-	return 0;
++out:
++	return ret;
++
++unreg_sa:
++	ib_sa_unregister_client(&srp_sa_client);
++	class_unregister(&srp_class);
++
++release_tr:
++	srp_release_transport(ib_srp_transport_template);
++
++destroy_wq:
++	destroy_workqueue(srp_remove_wq);
++	goto out;
+ }
+ 
+ static void __exit srp_cleanup_module(void)
+@@ -2681,6 +2698,7 @@ static void __exit srp_cleanup_module(void)
+ 	ib_sa_unregister_client(&srp_sa_client);
+ 	class_unregister(&srp_class);
+ 	srp_release_transport(ib_srp_transport_template);
++	destroy_workqueue(srp_remove_wq);
+ }
+ 
+ module_init(srp_init_module);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 22f656e125dd..67644e960592 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -3227,14 +3227,16 @@ free_domains:
+ 
+ static void cleanup_domain(struct protection_domain *domain)
+ {
+-	struct iommu_dev_data *dev_data, *next;
++	struct iommu_dev_data *entry;
+ 	unsigned long flags;
+ 
+ 	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ 
+-	list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
+-		__detach_device(dev_data);
+-		atomic_set(&dev_data->bind, 0);
++	while (!list_empty(&domain->dev_list)) {
++		entry = list_first_entry(&domain->dev_list,
++					 struct iommu_dev_data, list);
++		__detach_device(entry);
++		atomic_set(&entry->bind, 0);
+ 	}
+ 
+ 	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 66c4aee20c72..9b582c9444f2 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1406,12 +1406,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ 		mddev->degraded++;
+ 		set_bit(Faulty, &rdev->flags);
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+-		/*
+-		 * if recovery is running, make sure it aborts.
+-		 */
+-		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 	} else
+ 		set_bit(Faulty, &rdev->flags);
++	/*
++	 * if recovery is running, make sure it aborts.
++	 */
++	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ 	printk(KERN_ALERT
+ 	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 308575d23550..9ccb107c982e 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1698,13 +1698,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 		return;
+ 	}
+-	if (test_and_clear_bit(In_sync, &rdev->flags)) {
++	if (test_and_clear_bit(In_sync, &rdev->flags))
+ 		mddev->degraded++;
+-			/*
+-		 * if recovery is running, make sure it aborts.
+-		 */
+-		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+-	}
++	/*
++	 * If recovery is running, make sure it aborts.
++	 */
++	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ 	set_bit(Blocked, &rdev->flags);
+ 	set_bit(Faulty, &rdev->flags);
+ 	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+@@ -2970,6 +2969,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ 		 */
+ 		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
+ 			end_reshape(conf);
++			close_sync(conf);
+ 			return 0;
+ 		}
+ 
+@@ -4420,7 +4420,7 @@ read_more:
+ 	read_bio->bi_private = r10_bio;
+ 	read_bio->bi_end_io = end_sync_read;
+ 	read_bio->bi_rw = READ;
+-	read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
++	read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
+ 	read_bio->bi_flags |= 1 << BIO_UPTODATE;
+ 	read_bio->bi_vcnt = 0;
+ 	read_bio->bi_size = 0;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 3ecfb063ec0b..42510e40c23c 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3672,6 +3672,8 @@ static void handle_stripe(struct stripe_head *sh)
+ 				set_bit(R5_Wantwrite, &dev->flags);
+ 				if (prexor)
+ 					continue;
++				if (s.failed > 1)
++					continue;
+ 				if (!test_bit(R5_Insync, &dev->flags) ||
+ 				    ((i == sh->pd_idx || i == sh->qd_idx)  &&
+ 				     s.failed == 0))
+diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
+index f953d33ee151..4bfbd5f463d1 100644
+--- a/drivers/media/common/siano/Kconfig
++++ b/drivers/media/common/siano/Kconfig
+@@ -22,8 +22,7 @@ config SMS_SIANO_DEBUGFS
+ 	bool "Enable debugfs for smsdvb"
+ 	depends on SMS_SIANO_MDTV
+ 	depends on DEBUG_FS
+-	depends on SMS_USB_DRV
+-	depends on CONFIG_SMS_USB_DRV = CONFIG_SMS_SDIO_DRV
++	depends on SMS_USB_DRV = SMS_SDIO_DRV
+ 
+ 	---help---
+ 	  Choose Y to enable visualizing a dump of the frontend
+diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
+index 703560fa5e73..88c1606fd555 100644
+--- a/drivers/media/media-device.c
++++ b/drivers/media/media-device.c
+@@ -106,8 +106,6 @@ static long media_device_enum_entities(struct media_device *mdev,
+ 	if (ent->name) {
+ 		strncpy(u_ent.name, ent->name, sizeof(u_ent.name));
+ 		u_ent.name[sizeof(u_ent.name) - 1] = '\0';
+-	} else {
+-		memset(u_ent.name, 0, sizeof(u_ent.name));
+ 	}
+ 	u_ent.type = ent->type;
+ 	u_ent.revision = ent->revision;
+diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
+index 714c53ef6c11..2960ff1637d1 100644
+--- a/drivers/media/platform/vsp1/vsp1_video.c
++++ b/drivers/media/platform/vsp1/vsp1_video.c
+@@ -622,8 +622,6 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
+ 	if (vb->num_planes < format->num_planes)
+ 		return -EINVAL;
+ 
+-	buf->video = video;
+-
+ 	for (i = 0; i < vb->num_planes; ++i) {
+ 		buf->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ 		buf->length[i] = vb2_plane_size(vb, i);
+diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
+index d8612a378345..47b7a8ab5e2f 100644
+--- a/drivers/media/platform/vsp1/vsp1_video.h
++++ b/drivers/media/platform/vsp1/vsp1_video.h
+@@ -89,7 +89,6 @@ static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
+ }
+ 
+ struct vsp1_video_buffer {
+-	struct vsp1_video *video;
+ 	struct vb2_buffer buf;
+ 	struct list_head queue;
+ 
+diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
+index 2018befabb5a..e71decbfd0af 100644
+--- a/drivers/media/tuners/xc4000.c
++++ b/drivers/media/tuners/xc4000.c
+@@ -93,7 +93,7 @@ struct xc4000_priv {
+ 	struct firmware_description *firm;
+ 	int	firm_size;
+ 	u32	if_khz;
+-	u32	freq_hz;
++	u32	freq_hz, freq_offset;
+ 	u32	bandwidth;
+ 	u8	video_standard;
+ 	u8	rf_mode;
+@@ -1157,14 +1157,14 @@ static int xc4000_set_params(struct dvb_frontend *fe)
+ 	case SYS_ATSC:
+ 		dprintk(1, "%s() VSB modulation\n", __func__);
+ 		priv->rf_mode = XC_RF_MODE_AIR;
+-		priv->freq_hz = c->frequency - 1750000;
++		priv->freq_offset = 1750000;
+ 		priv->video_standard = XC4000_DTV6;
+ 		type = DTV6;
+ 		break;
+ 	case SYS_DVBC_ANNEX_B:
+ 		dprintk(1, "%s() QAM modulation\n", __func__);
+ 		priv->rf_mode = XC_RF_MODE_CABLE;
+-		priv->freq_hz = c->frequency - 1750000;
++		priv->freq_offset = 1750000;
+ 		priv->video_standard = XC4000_DTV6;
+ 		type = DTV6;
+ 		break;
+@@ -1173,23 +1173,23 @@ static int xc4000_set_params(struct dvb_frontend *fe)
+ 		dprintk(1, "%s() OFDM\n", __func__);
+ 		if (bw == 0) {
+ 			if (c->frequency < 400000000) {
+-				priv->freq_hz = c->frequency - 2250000;
++				priv->freq_offset = 2250000;
+ 			} else {
+-				priv->freq_hz = c->frequency - 2750000;
++				priv->freq_offset = 2750000;
+ 			}
+ 			priv->video_standard = XC4000_DTV7_8;
+ 			type = DTV78;
+ 		} else if (bw <= 6000000) {
+ 			priv->video_standard = XC4000_DTV6;
+-			priv->freq_hz = c->frequency - 1750000;
++			priv->freq_offset = 1750000;
+ 			type = DTV6;
+ 		} else if (bw <= 7000000) {
+ 			priv->video_standard = XC4000_DTV7;
+-			priv->freq_hz = c->frequency - 2250000;
++			priv->freq_offset = 2250000;
+ 			type = DTV7;
+ 		} else {
+ 			priv->video_standard = XC4000_DTV8;
+-			priv->freq_hz = c->frequency - 2750000;
++			priv->freq_offset = 2750000;
+ 			type = DTV8;
+ 		}
+ 		priv->rf_mode = XC_RF_MODE_AIR;
+@@ -1200,6 +1200,8 @@ static int xc4000_set_params(struct dvb_frontend *fe)
+ 		goto fail;
+ 	}
+ 
++	priv->freq_hz = c->frequency - priv->freq_offset;
++
+ 	dprintk(1, "%s() frequency=%d (compensated)\n",
+ 		__func__, priv->freq_hz);
+ 
+@@ -1520,7 +1522,7 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ {
+ 	struct xc4000_priv *priv = fe->tuner_priv;
+ 
+-	*freq = priv->freq_hz;
++	*freq = priv->freq_hz + priv->freq_offset;
+ 
+ 	if (debug) {
+ 		mutex_lock(&priv->lock);
+diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
+index 5cd09a681b6a..b2d9e9cb97f7 100644
+--- a/drivers/media/tuners/xc5000.c
++++ b/drivers/media/tuners/xc5000.c
+@@ -55,7 +55,7 @@ struct xc5000_priv {
+ 
+ 	u32 if_khz;
+ 	u16 xtal_khz;
+-	u32 freq_hz;
++	u32 freq_hz, freq_offset;
+ 	u32 bandwidth;
+ 	u8  video_standard;
+ 	u8  rf_mode;
+@@ -755,13 +755,13 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ 	case SYS_ATSC:
+ 		dprintk(1, "%s() VSB modulation\n", __func__);
+ 		priv->rf_mode = XC_RF_MODE_AIR;
+-		priv->freq_hz = freq - 1750000;
++		priv->freq_offset = 1750000;
+ 		priv->video_standard = DTV6;
+ 		break;
+ 	case SYS_DVBC_ANNEX_B:
+ 		dprintk(1, "%s() QAM modulation\n", __func__);
+ 		priv->rf_mode = XC_RF_MODE_CABLE;
+-		priv->freq_hz = freq - 1750000;
++		priv->freq_offset = 1750000;
+ 		priv->video_standard = DTV6;
+ 		break;
+ 	case SYS_ISDBT:
+@@ -776,15 +776,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ 		switch (bw) {
+ 		case 6000000:
+ 			priv->video_standard = DTV6;
+-			priv->freq_hz = freq - 1750000;
++			priv->freq_offset = 1750000;
+ 			break;
+ 		case 7000000:
+ 			priv->video_standard = DTV7;
+-			priv->freq_hz = freq - 2250000;
++			priv->freq_offset = 2250000;
+ 			break;
+ 		case 8000000:
+ 			priv->video_standard = DTV8;
+-			priv->freq_hz = freq - 2750000;
++			priv->freq_offset = 2750000;
+ 			break;
+ 		default:
+ 			printk(KERN_ERR "xc5000 bandwidth not set!\n");
+@@ -798,15 +798,15 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ 		priv->rf_mode = XC_RF_MODE_CABLE;
+ 		if (bw <= 6000000) {
+ 			priv->video_standard = DTV6;
+-			priv->freq_hz = freq - 1750000;
++			priv->freq_offset = 1750000;
+ 			b = 6;
+ 		} else if (bw <= 7000000) {
+ 			priv->video_standard = DTV7;
+-			priv->freq_hz = freq - 2250000;
++			priv->freq_offset = 2250000;
+ 			b = 7;
+ 		} else {
+ 			priv->video_standard = DTV7_8;
+-			priv->freq_hz = freq - 2750000;
++			priv->freq_offset = 2750000;
+ 			b = 8;
+ 		}
+ 		dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__,
+@@ -817,6 +817,8 @@ static int xc5000_set_params(struct dvb_frontend *fe)
+ 		return -EINVAL;
+ 	}
+ 
++	priv->freq_hz = freq - priv->freq_offset;
++
+ 	dprintk(1, "%s() frequency=%d (compensated to %d)\n",
+ 		__func__, freq, priv->freq_hz);
+ 
+@@ -1067,7 +1069,7 @@ static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+ {
+ 	struct xc5000_priv *priv = fe->tuner_priv;
+ 	dprintk(1, "%s()\n", __func__);
+-	*freq = priv->freq_hz;
++	*freq = priv->freq_hz + priv->freq_offset;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
+index f6154546b5c0..7ed75efa1c36 100644
+--- a/drivers/media/usb/au0828/au0828-video.c
++++ b/drivers/media/usb/au0828/au0828-video.c
+@@ -787,11 +787,27 @@ static int au0828_i2s_init(struct au0828_dev *dev)
+ 
+ /*
+  * Auvitek au0828 analog stream enable
+- * Please set interface0 to AS5 before enable the stream
+  */
+ static int au0828_analog_stream_enable(struct au0828_dev *d)
+ {
++	struct usb_interface *iface;
++	int ret;
++
+ 	dprintk(1, "au0828_analog_stream_enable called\n");
++
++	iface = usb_ifnum_to_if(d->usbdev, 0);
++	if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
++		dprintk(1, "Changing intf#0 to alt 5\n");
++		/* set au0828 interface0 to AS5 here again */
++		ret = usb_set_interface(d->usbdev, 0, 5);
++		if (ret < 0) {
++			printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
++			return -EBUSY;
++		}
++	}
++
++	/* FIXME: size should be calculated using d->width, d->height */
++
+ 	au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00);
+ 	au0828_writereg(d, 0x106, 0x00);
+ 	/* set x position */
+@@ -1002,15 +1018,6 @@ static int au0828_v4l2_open(struct file *filp)
+ 		return -ERESTARTSYS;
+ 	}
+ 	if (dev->users == 0) {
+-		/* set au0828 interface0 to AS5 here again */
+-		ret = usb_set_interface(dev->usbdev, 0, 5);
+-		if (ret < 0) {
+-			mutex_unlock(&dev->lock);
+-			printk(KERN_INFO "Au0828 can't set alternate to 5!\n");
+-			kfree(fh);
+-			return -EBUSY;
+-		}
+-
+ 		au0828_analog_stream_enable(dev);
+ 		au0828_analog_stream_reset(dev);
+ 
+@@ -1252,13 +1259,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd,
+ 		}
+ 	}
+ 
+-	/* set au0828 interface0 to AS5 here again */
+-	ret = usb_set_interface(dev->usbdev, 0, 5);
+-	if (ret < 0) {
+-		printk(KERN_INFO "Au0828 can't set alt setting to 5!\n");
+-		return -EBUSY;
+-	}
+-
+ 	au0828_analog_stream_enable(dev);
+ 
+ 	return 0;
+diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
+index 29ee54d68512..5dd653f9b094 100644
+--- a/drivers/mfd/omap-usb-host.c
++++ b/drivers/mfd/omap-usb-host.c
+@@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
+ 
+ 		for (i = 0; i < omap->nports; i++) {
+ 			if (is_ehci_phy_mode(pdata->port_mode[i])) {
+-				reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
++				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
+index 994ca4aff1a3..4b7ea3fb143c 100644
+--- a/drivers/misc/mei/nfc.c
++++ b/drivers/misc/mei/nfc.c
+@@ -342,9 +342,10 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+ 	ndev = (struct mei_nfc_dev *) cldev->priv_data;
+ 	dev = ndev->cl->dev;
+ 
++	err = -ENOMEM;
+ 	mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
+ 	if (!mei_buf)
+-		return -ENOMEM;
++		goto out;
+ 
+ 	hdr = (struct mei_nfc_hci_hdr *) mei_buf;
+ 	hdr->cmd = MEI_NFC_CMD_HCI_SEND;
+@@ -354,12 +355,9 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+ 	hdr->data_size = length;
+ 
+ 	memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
+-
+ 	err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
+ 	if (err < 0)
+-		return err;
+-
+-	kfree(mei_buf);
++		goto out;
+ 
+ 	if (!wait_event_interruptible_timeout(ndev->send_wq,
+ 				ndev->recv_req_id == ndev->req_id, HZ)) {
+@@ -368,7 +366,8 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
+ 	} else {
+ 		ndev->req_id++;
+ 	}
+-
++out:
++	kfree(mei_buf);
+ 	return err;
+ }
+ 
+diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
+index 19d637266fcd..71e4f6ccae2f 100644
+--- a/drivers/mtd/ftl.c
++++ b/drivers/mtd/ftl.c
+@@ -1075,7 +1075,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+ 			return;
+ 	}
+ 
+-	ftl_freepart(partition);
+ 	kfree(partition);
+ }
+ 
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index 0332d0b2d73a..854662826272 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -948,7 +948,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ 	u32 val;
+ 
+ 	val = readl(info->reg.gpmc_ecc_config);
+-	if (((val >> ECC_CONFIG_CS_SHIFT)  & ~CS_MASK) != info->gpmc_cs)
++	if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
+ 		return -EINVAL;
+ 
+ 	/* read ecc result */
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 6c0fd8e0f9bf..895b086ec261 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -293,6 +293,18 @@ failure:
+ 	atomic_add(buffers_added, &(pool->available));
+ }
+ 
++/*
++ * The final 8 bytes of the buffer list is a counter of frames dropped
++ * because there was not a buffer in the buffer list capable of holding
++ * the frame.
++ */
++static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
++{
++	__be64 *p = adapter->buffer_list_addr + 4096 - 8;
++
++	adapter->rx_no_buffer = be64_to_cpup(p);
++}
++
+ /* replenish routine */
+ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
+ {
+@@ -308,8 +320,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
+ 			ibmveth_replenish_buffer_pool(adapter, pool);
+ 	}
+ 
+-	adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
+-						4096 - 8);
++	ibmveth_update_rx_no_buffer(adapter);
+ }
+ 
+ /* empty and free ana buffer pool - also used to do cleanup in error paths */
+@@ -699,8 +710,7 @@ static int ibmveth_close(struct net_device *netdev)
+ 
+ 	free_irq(netdev->irq, netdev);
+ 
+-	adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
+-						4096 - 8);
++	ibmveth_update_rx_no_buffer(adapter);
+ 
+ 	ibmveth_cleanup(adapter);
+ 
+diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
+index 8596aba34f96..237d0cda1bcb 100644
+--- a/drivers/net/wireless/ath/carl9170/carl9170.h
++++ b/drivers/net/wireless/ath/carl9170/carl9170.h
+@@ -256,6 +256,7 @@ struct ar9170 {
+ 	atomic_t rx_work_urbs;
+ 	atomic_t rx_pool_urbs;
+ 	kernel_ulong_t features;
++	bool usb_ep_cmd_is_bulk;
+ 
+ 	/* firmware settings */
+ 	struct completion fw_load_wait;
+diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
+index 307bc0ddff99..83d20c8b2ad7 100644
+--- a/drivers/net/wireless/ath/carl9170/usb.c
++++ b/drivers/net/wireless/ath/carl9170/usb.c
+@@ -621,9 +621,16 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
+ 		goto err_free;
+ 	}
+ 
+-	usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
+-		AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
+-		carl9170_usb_cmd_complete, ar, 1);
++	if (ar->usb_ep_cmd_is_bulk)
++		usb_fill_bulk_urb(urb, ar->udev,
++				  usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD),
++				  cmd, cmd->hdr.len + 4,
++				  carl9170_usb_cmd_complete, ar);
++	else
++		usb_fill_int_urb(urb, ar->udev,
++				 usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD),
++				 cmd, cmd->hdr.len + 4,
++				 carl9170_usb_cmd_complete, ar, 1);
+ 
+ 	if (free_buf)
+ 		urb->transfer_flags |= URB_FREE_BUFFER;
+@@ -1032,9 +1039,10 @@ static void carl9170_usb_firmware_step2(const struct firmware *fw,
+ static int carl9170_usb_probe(struct usb_interface *intf,
+ 			      const struct usb_device_id *id)
+ {
++	struct usb_endpoint_descriptor *ep;
+ 	struct ar9170 *ar;
+ 	struct usb_device *udev;
+-	int err;
++	int i, err;
+ 
+ 	err = usb_reset_device(interface_to_usbdev(intf));
+ 	if (err)
+@@ -1050,6 +1058,21 @@ static int carl9170_usb_probe(struct usb_interface *intf,
+ 	ar->intf = intf;
+ 	ar->features = id->driver_info;
+ 
++	/* We need to remember the type of endpoint 4 because it differs
++	 * between high- and full-speed configuration. The high-speed
++	 * configuration specifies it as interrupt and the full-speed
++	 * configuration as bulk endpoint. This information is required
++	 * later when sending urbs to that endpoint.
++	 */
++	for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) {
++		ep = &intf->cur_altsetting->endpoint[i].desc;
++
++		if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD &&
++		    usb_endpoint_dir_out(ep) &&
++		    usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK)
++			ar->usb_ep_cmd_is_bulk = true;
++	}
++
+ 	usb_set_intfdata(intf, ar);
+ 	SET_IEEE80211_DEV(ar->hw, &intf->dev);
+ 
+diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
+index 7917bb2fa834..406e50ef5152 100644
+--- a/drivers/regulator/arizona-ldo1.c
++++ b/drivers/regulator/arizona-ldo1.c
+@@ -141,8 +141,6 @@ static struct regulator_ops arizona_ldo1_ops = {
+ 	.map_voltage = regulator_map_voltage_linear,
+ 	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+ 	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+-	.get_bypass = regulator_get_bypass_regmap,
+-	.set_bypass = regulator_set_bypass_regmap,
+ };
+ 
+ static const struct regulator_desc arizona_ldo1 = {
+diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
+index 90814fe85ac1..d5b3f66f0ebd 100644
+--- a/drivers/scsi/bfa/bfa_ioc.h
++++ b/drivers/scsi/bfa/bfa_ioc.h
+@@ -72,7 +72,7 @@ struct bfa_sge_s {
+ } while (0)
+ 
+ #define bfa_swap_words(_x)  (	\
+-	((_x) << 32) | ((_x) >> 32))
++	((u64)(_x) << 32) | ((u64)(_x) >> 32))
+ 
+ #ifdef __BIG_ENDIAN
+ #define bfa_sge_to_be(_x)
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index f969aca0b54e..49014a143c6a 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -222,6 +222,7 @@ static struct {
+ 	{"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ 	{"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ 	{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
++	{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
+ 	{"Promise", "", NULL, BLIST_SPARSELUN},
+ 	{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
+ 	{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 4109530e92a0..054ec2c412a4 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -922,6 +922,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ 	if (*bflags & BLIST_USE_10_BYTE_MS)
+ 		sdev->use_10_for_ms = 1;
+ 
++	/* some devices don't like REPORT SUPPORTED OPERATION CODES
++	 * and will simply timeout causing sd_mod init to take a very
++	 * very long time */
++	if (*bflags & BLIST_NO_RSOC)
++		sdev->no_report_opcodes = 1;
++
+ 	/* set the device running here so that slave configure
+ 	 * may do I/O */
+ 	ret = scsi_device_set_state(sdev, SDEV_RUNNING);
+@@ -950,7 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ 
+ 	sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
+ 
+-	if (*bflags & BLIST_SKIP_VPD_PAGES)
++	if (*bflags & BLIST_TRY_VPD_PAGES)
++		sdev->try_vpd_pages = 1;
++	else if (*bflags & BLIST_SKIP_VPD_PAGES)
+ 		sdev->skip_vpd_pages = 1;
+ 
+ 	transport_configure_device(&sdev->sdev_gendev);
+@@ -1236,6 +1244,12 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
+ 		max_dev_lun = min(8U, max_dev_lun);
+ 
+ 	/*
++	 * Stop scanning at 255 unless BLIST_SCSI3LUN
++	 */
++	if (!(bflags & BLIST_SCSI3LUN))
++		max_dev_lun = min(256U, max_dev_lun);
++
++	/*
+ 	 * We have already scanned LUN 0, so start at LUN 1. Keep scanning
+ 	 * until we reach the max, or no LUN is found and we are not
+ 	 * sparse_lun.
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index dbc024bd4adf..69d2a7060fde 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2667,6 +2667,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+ 
+ static int sd_try_extended_inquiry(struct scsi_device *sdp)
+ {
++	/* Attempt VPD inquiry if the device blacklist explicitly calls
++	 * for it.
++	 */
++	if (sdp->try_vpd_pages)
++		return 1;
+ 	/*
+ 	 * Although VPD inquiries can go to SCSI-2 type devices,
+ 	 * some USB ones crash on receiving them, and the pages
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 9969fa1ef7c4..ed0f899e8aa5 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -33,6 +33,7 @@
+ #include <linux/device.h>
+ #include <linux/hyperv.h>
+ #include <linux/mempool.h>
++#include <linux/blkdev.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_host.h>
+@@ -330,17 +331,17 @@ static int storvsc_timeout = 180;
+ 
+ static void storvsc_on_channel_callback(void *context);
+ 
+-/*
+- * In Hyper-V, each port/path/target maps to 1 scsi host adapter.  In
+- * reality, the path/target is not used (ie always set to 0) so our
+- * scsi host adapter essentially has 1 bus with 1 target that contains
+- * up to 256 luns.
+- */
+-#define STORVSC_MAX_LUNS_PER_TARGET			64
+-#define STORVSC_MAX_TARGETS				1
+-#define STORVSC_MAX_CHANNELS				1
++#define STORVSC_MAX_LUNS_PER_TARGET			255
++#define STORVSC_MAX_TARGETS				2
++#define STORVSC_MAX_CHANNELS				8
+ 
++#define STORVSC_FC_MAX_LUNS_PER_TARGET			255
++#define STORVSC_FC_MAX_TARGETS				128
++#define STORVSC_FC_MAX_CHANNELS				8
+ 
++#define STORVSC_IDE_MAX_LUNS_PER_TARGET			64
++#define STORVSC_IDE_MAX_TARGETS				1
++#define STORVSC_IDE_MAX_CHANNELS			1
+ 
+ struct storvsc_cmd_request {
+ 	struct list_head entry;
+@@ -1017,6 +1018,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ 		case ATA_12:
+ 			set_host_byte(scmnd, DID_PASSTHROUGH);
+ 			break;
++		/*
++		 * On Some Windows hosts TEST_UNIT_READY command can return
++		 * SRB_STATUS_ERROR, let the upper level code deal with it
++		 * based on the sense information.
++		 */
++		case TEST_UNIT_READY:
++			break;
+ 		default:
+ 			set_host_byte(scmnd, DID_TARGET_FAILURE);
+ 		}
+@@ -1518,6 +1526,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+ 	return SUCCESS;
+ }
+ 
++/*
++ * The host guarantees to respond to each command, although I/O latencies might
++ * be unbounded on Azure.  Reset the timer unconditionally to give the host a
++ * chance to perform EH.
++ */
++static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
++{
++	return BLK_EH_RESET_TIMER;
++}
++
+ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
+ {
+ 	bool allowed = true;
+@@ -1553,9 +1571,19 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	struct vmscsi_request *vm_srb;
+ 	struct stor_mem_pools *memp = scmnd->device->hostdata;
+ 
+-	if (!storvsc_scsi_cmd_ok(scmnd)) {
+-		scmnd->scsi_done(scmnd);
+-		return 0;
++	if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
++		/*
++		 * On legacy hosts filter unimplemented commands.
++		 * Future hosts are expected to correctly handle
++		 * unsupported commands. Furthermore, it is
++		 * possible that some of the currently
++		 * unsupported commands maybe supported in
++		 * future versions of the host.
++		 */
++		if (!storvsc_scsi_cmd_ok(scmnd)) {
++			scmnd->scsi_done(scmnd);
++			return 0;
++		}
+ 	}
+ 
+ 	request_size = sizeof(struct storvsc_cmd_request);
+@@ -1580,26 +1608,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	vm_srb = &cmd_request->vstor_packet.vm_srb;
+ 	vm_srb->win8_extension.time_out_value = 60;
+ 
++	vm_srb->win8_extension.srb_flags |=
++		(SRB_FLAGS_QUEUE_ACTION_ENABLE |
++		SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
+ 
+ 	/* Build the SRB */
+ 	switch (scmnd->sc_data_direction) {
+ 	case DMA_TO_DEVICE:
+ 		vm_srb->data_in = WRITE_TYPE;
+ 		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
+-		vm_srb->win8_extension.srb_flags |=
+-			(SRB_FLAGS_QUEUE_ACTION_ENABLE |
+-			SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
+ 		break;
+ 	case DMA_FROM_DEVICE:
+ 		vm_srb->data_in = READ_TYPE;
+ 		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
+-		vm_srb->win8_extension.srb_flags |=
+-			(SRB_FLAGS_QUEUE_ACTION_ENABLE |
+-			SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
+ 		break;
+ 	default:
+ 		vm_srb->data_in = UNKNOWN_TYPE;
+-		vm_srb->win8_extension.srb_flags = 0;
++		vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
++						     SRB_FLAGS_DATA_OUT);
+ 		break;
+ 	}
+ 
+@@ -1687,11 +1713,11 @@ static struct scsi_host_template scsi_driver = {
+ 	.bios_param =		storvsc_get_chs,
+ 	.queuecommand =		storvsc_queuecommand,
+ 	.eh_host_reset_handler =	storvsc_host_reset_handler,
++	.eh_timed_out =		storvsc_eh_timed_out,
+ 	.slave_alloc =		storvsc_device_alloc,
+ 	.slave_destroy =	storvsc_device_destroy,
+ 	.slave_configure =	storvsc_device_configure,
+-	.cmd_per_lun =		1,
+-	/* 64 max_queue * 1 target */
++	.cmd_per_lun =		255,
+ 	.can_queue =		STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
+ 	.this_id =		-1,
+ 	/* no use setting to 0 since ll_blk_rw reset it to 1 */
+@@ -1743,19 +1769,25 @@ static int storvsc_probe(struct hv_device *device,
+ 	 * set state to properly communicate with the host.
+ 	 */
+ 
+-	if (vmbus_proto_version == VERSION_WIN8) {
+-		sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
+-		vmscsi_size_delta = 0;
+-		vmstor_current_major = VMSTOR_WIN8_MAJOR;
+-		vmstor_current_minor = VMSTOR_WIN8_MINOR;
+-	} else {
++	switch (vmbus_proto_version) {
++	case VERSION_WS2008:
++	case VERSION_WIN7:
+ 		sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
+ 		vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
+ 		vmstor_current_major = VMSTOR_WIN7_MAJOR;
+ 		vmstor_current_minor = VMSTOR_WIN7_MINOR;
++		break;
++	default:
++		sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
++		vmscsi_size_delta = 0;
++		vmstor_current_major = VMSTOR_WIN8_MAJOR;
++		vmstor_current_minor = VMSTOR_WIN8_MINOR;
++		break;
+ 	}
+ 
+-
++	if (dev_id->driver_data == SFC_GUID)
++		scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS *
++					 STORVSC_FC_MAX_TARGETS);
+ 	host = scsi_host_alloc(&scsi_driver,
+ 			       sizeof(struct hv_host_device));
+ 	if (!host)
+@@ -1789,12 +1821,25 @@ static int storvsc_probe(struct hv_device *device,
+ 	host_dev->path = stor_device->path_id;
+ 	host_dev->target = stor_device->target_id;
+ 
+-	/* max # of devices per target */
+-	host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
+-	/* max # of targets per channel */
+-	host->max_id = STORVSC_MAX_TARGETS;
+-	/* max # of channels */
+-	host->max_channel = STORVSC_MAX_CHANNELS - 1;
++	switch (dev_id->driver_data) {
++	case SFC_GUID:
++		host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
++		host->max_id = STORVSC_FC_MAX_TARGETS;
++		host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
++		break;
++
++	case SCSI_GUID:
++		host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
++		host->max_id = STORVSC_MAX_TARGETS;
++		host->max_channel = STORVSC_MAX_CHANNELS - 1;
++		break;
++
++	default:
++		host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
++		host->max_id = STORVSC_IDE_MAX_TARGETS;
++		host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
++		break;
++	}
+ 	/* max cmd length */
+ 	host->max_cmd_len = STORVSC_MAX_CMD_LEN;
+ 
+diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
+index 1d1d321d90c4..72006e63d513 100644
+--- a/drivers/spi/spi-orion.c
++++ b/drivers/spi/spi-orion.c
+@@ -404,8 +404,6 @@ static int orion_spi_probe(struct platform_device *pdev)
+ 	struct resource *r;
+ 	unsigned long tclk_hz;
+ 	int status = 0;
+-	const u32 *iprop;
+-	int size;
+ 
+ 	master = spi_alloc_master(&pdev->dev, sizeof *spi);
+ 	if (master == NULL) {
+@@ -416,10 +414,10 @@ static int orion_spi_probe(struct platform_device *pdev)
+ 	if (pdev->id != -1)
+ 		master->bus_num = pdev->id;
+ 	if (pdev->dev.of_node) {
+-		iprop = of_get_property(pdev->dev.of_node, "cell-index",
+-					&size);
+-		if (iprop && size == sizeof(*iprop))
+-			master->bus_num = *iprop;
++		u32 cell_index;
++		if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
++					  &cell_index))
++			master->bus_num = cell_index;
+ 	}
+ 
+ 	/* we support only mode 0, and no options */
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index 7b69e93d8448..fa28c75c6d04 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1082,6 +1082,7 @@ static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ 	{ "INT3430", 0 },
+ 	{ "INT3431", 0 },
+ 	{ "80860F0E", 0 },
++	{ "8086228E", 0 },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index 85f692ddd992..d1eea2d426bd 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -53,9 +53,11 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+ 	{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
+ 	/*=== Customer ID ===*/
+ 	/****** 8188EUS ********/
++	{USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */
+ 	{USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
+ 	{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ 	{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
++	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+ 	{}	/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 557e8a9fe58a..721de375c543 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1704,8 +1704,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	 * - Change autosuspend delay of hub can avoid unnecessary auto
+ 	 *   suspend timer for hub, also may decrease power consumption
+ 	 *   of USB bus.
++	 *
++	 * - If user has indicated to prevent autosuspend by passing
++	 *   usbcore.autosuspend = -1 then keep autosuspend disabled.
+ 	 */
+-	pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
++#ifdef CONFIG_PM_RUNTIME
++	if (hdev->dev.power.autosuspend_delay >= 0)
++		pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
++#endif
+ 
+ 	/*
+ 	 * Hubs have proper suspend/resume support, except for root hubs
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 1bb85bee2625..7ba861543d03 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -1241,7 +1241,7 @@ static int ehci_hub_control (
+ 			if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
+ 				spin_unlock_irqrestore(&ehci->lock, flags);
+ 				retval = ehset_single_step_set_feature(hcd,
+-									wIndex);
++								wIndex + 1);
+ 				spin_lock_irqsave(&ehci->lock, flags);
+ 				break;
+ 			}
+diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
+index cc9dd9e4f05e..45f87735b449 100644
+--- a/drivers/usb/host/ohci-spear.c
++++ b/drivers/usb/host/ohci-spear.c
+@@ -53,7 +53,7 @@ static int ohci_spear_start(struct usb_hcd *hcd)
+ 	create_debug_files(ohci);
+ 
+ #ifdef DEBUG
+-	ohci_dump(ohci, 1);
++	ohci_dump(ohci);
+ #endif
+ 	return 0;
+ }
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index f34b42e4c391..1cfe0c743092 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -101,6 +101,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	/* AMD PLL quirk */
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
+ 		xhci->quirks |= XHCI_AMD_PLL_FIX;
++
++	if (pdev->vendor == PCI_VENDOR_ID_AMD)
++		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ 		xhci->quirks |= XHCI_LPM_SUPPORT;
+ 		xhci->quirks |= XHCI_INTEL_HOST;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6118e292d5df..46ad9f3f589d 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2579,7 +2579,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		 * last TRB of the previous TD. The command completion handle
+ 		 * will take care the rest.
+ 		 */
+-		if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
++		if (!event_seg && (trb_comp_code == COMP_STOP ||
++				   trb_comp_code == COMP_STOP_INVAL)) {
+ 			ret = 0;
+ 			goto cleanup;
+ 		}
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index bac979402ce3..bb68ed5cd3bc 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -152,6 +152,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
+@@ -948,6 +949,8 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
+ 	{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
++	/* ekey Devices */
++	{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
+ 	/* Infineon Devices */
+ 	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ 	{ }					/* Terminating entry */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 1e58d90a0b6c..70b0b1d88ae9 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -42,6 +42,8 @@
+ /* www.candapter.com Ewert Energy Systems CANdapter device */
+ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+ 
++#define FTDI_BM_ATOM_NANO_PID	0xa559	/* Basic Micro ATOM Nano USB2Serial */
++
+ /*
+  * Texas Instruments XDS100v2 JTAG / BeagleBone A3
+  * http://processors.wiki.ti.com/index.php/XDS100
+@@ -1378,3 +1380,8 @@
+ #define BRAINBOXES_US_160_6_PID		0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
+ #define BRAINBOXES_US_160_7_PID		0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
+ #define BRAINBOXES_US_160_8_PID		0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
++
++/*
++ * ekey biometric systems GmbH (http://ekey.net/)
++ */
++#define FTDI_EKEY_CONV_USB_PID		0xCB08	/* Converter USB */
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index 36a7740e827c..cc5a430dc357 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -521,6 +521,10 @@ static void command_port_read_callback(struct urb *urb)
+ 		dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
+ 		return;
+ 	}
++	if (!urb->actual_length) {
++		dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__);
++		return;
++	}
+ 	if (status) {
+ 		dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
+ 		if (status != -ENOENT)
+@@ -541,7 +545,8 @@ static void command_port_read_callback(struct urb *urb)
+ 		/* These are unsolicited reports from the firmware, hence no
+ 		   waiting command to wakeup */
+ 		dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
+-	} else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
++	} else if ((data[0] == WHITEHEAT_GET_DTR_RTS) &&
++		(urb->actual_length - 1 <= sizeof(command_info->result_buffer))) {
+ 		memcpy(command_info->result_buffer, &data[1],
+ 						urb->actual_length - 1);
+ 		command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
+diff --git a/fs/aio.c b/fs/aio.c
+index 6d68e01dc7ca..b732a9c32042 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1065,6 +1065,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
+ 	tail = ring->tail;
+ 	kunmap_atomic(ring);
+ 
++	/*
++	 * Ensure that once we've read the current tail pointer, that
++	 * we also see the events that were stored up to the tail.
++	 */
++	smp_rmb();
++
+ 	pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
+ 
+ 	if (head == tail)
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 1f4ce7ac144d..53039de1495d 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -263,9 +263,8 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
+ 			}
+ 			if (ret > 0)
+ 				goto next;
+-			ret = ulist_add_merge(parents, eb->start,
+-					      (uintptr_t)eie,
+-					      (u64 *)&old, GFP_NOFS);
++			ret = ulist_add_merge_ptr(parents, eb->start,
++						  eie, (void **)&old, GFP_NOFS);
+ 			if (ret < 0)
+ 				break;
+ 			if (!ret && extent_item_pos) {
+@@ -955,16 +954,19 @@ again:
+ 					ret = -EIO;
+ 					goto out;
+ 				}
++				btrfs_tree_read_lock(eb);
++				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ 				ret = find_extent_in_eb(eb, bytenr,
+ 							*extent_item_pos, &eie);
++				btrfs_tree_read_unlock_blocking(eb);
+ 				free_extent_buffer(eb);
+ 				if (ret < 0)
+ 					goto out;
+ 				ref->inode_list = eie;
+ 			}
+-			ret = ulist_add_merge(refs, ref->parent,
+-					      (uintptr_t)ref->inode_list,
+-					      (u64 *)&eie, GFP_NOFS);
++			ret = ulist_add_merge_ptr(refs, ref->parent,
++						  ref->inode_list,
++						  (void **)&eie, GFP_NOFS);
+ 			if (ret < 0)
+ 				goto out;
+ 			if (!ret && extent_item_pos) {
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index b395791dd923..594bbfd4996e 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2485,6 +2485,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
+ 					test_bit(BIO_UPTODATE, &bio->bi_flags);
+ 				if (err)
+ 					uptodate = 0;
++				offset += len;
+ 				continue;
+ 			}
+ 		}
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 4f53159bdb9d..d4731e9808ea 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -752,7 +752,7 @@ again:
+ 				found_next = 1;
+ 			if (ret != 0)
+ 				goto insert;
+-			slot = 0;
++			slot = path->slots[0];
+ 		}
+ 		btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+ 		if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index fa8010c1b628..7e6758d075ad 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -683,6 +683,18 @@ retry:
+ 				unlock_extent(io_tree, async_extent->start,
+ 					      async_extent->start +
+ 					      async_extent->ram_size - 1);
++
++				/*
++				 * we need to redirty the pages if we decide to
++				 * fallback to uncompressed IO, otherwise we
++				 * will not submit these pages down to lower
++				 * layers.
++				 */
++				extent_range_redirty_for_io(inode,
++						async_extent->start,
++						async_extent->start +
++						async_extent->ram_size - 1);
++
+ 				goto retry;
+ 			}
+ 			goto out_free;
+diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
+index fb36731074b5..3e62b57be6b5 100644
+--- a/fs/btrfs/ulist.h
++++ b/fs/btrfs/ulist.h
+@@ -74,6 +74,21 @@ void ulist_free(struct ulist *ulist);
+ int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
+ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
+ 		    u64 *old_aux, gfp_t gfp_mask);
++
++/* just like ulist_add_merge() but take a pointer for the aux data */
++static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux,
++				      void **old_aux, gfp_t gfp_mask)
++{
++#if BITS_PER_LONG == 32
++	u64 old64 = (uintptr_t)*old_aux;
++	int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask);
++	*old_aux = (void *)((uintptr_t)old64);
++	return ret;
++#else
++	return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask);
++#endif
++}
++
+ struct ulist_node *ulist_next(struct ulist *ulist,
+ 			      struct ulist_iterator *uiter);
+ 
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 465b65488b27..d13f77ea0034 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -70,11 +70,6 @@
+ #define SERVER_NAME_LENGTH 40
+ #define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
+ 
+-/* used to define string lengths for reversing unicode strings */
+-/*         (256+1)*2 = 514                                     */
+-/*           (max path length + 1 for null) * 2 for unicode    */
+-#define MAX_NAME 514
+-
+ /* SMB echo "timeout" -- FIXME: tunable? */
+ #define SMB_ECHO_INTERVAL (60 * HZ)
+ 
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 643a18491bed..892a1e947b5a 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -2847,7 +2847,7 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
+ 		total_read += result;
+ 	}
+ 
+-	return total_read > 0 ? total_read : result;
++	return total_read > 0 && result != -EAGAIN ? total_read : result;
+ }
+ 
+ static ssize_t
+@@ -3270,7 +3270,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
+ 		total_read += result;
+ 	}
+ 
+-	return total_read > 0 ? total_read : result;
++	return total_read > 0 && result != -EAGAIN ? total_read : result;
+ }
+ 
+ static int cifs_readpages(struct file *file, struct address_space *mapping,
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 5f8bdff3a758..2a93255c0150 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -1682,13 +1682,22 @@ cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
+ unlink_target:
+ 	/* Try unlinking the target dentry if it's not negative */
+ 	if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
+-		tmprc = cifs_unlink(target_dir, target_dentry);
++		if (S_ISDIR(target_dentry->d_inode->i_mode))
++			tmprc = cifs_rmdir(target_dir, target_dentry);
++		else
++			tmprc = cifs_unlink(target_dir, target_dentry);
+ 		if (tmprc)
+ 			goto cifs_rename_exit;
+ 		rc = cifs_do_rename(xid, source_dentry, from_name,
+ 				    target_dentry, to_name);
+ 	}
+ 
++	/* force revalidate to go get info when needed */
++	CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
++
++	source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
++		target_dir->i_mtime = current_fs_time(source_dir->i_sb);
++
+ cifs_rename_exit:
+ 	kfree(info_buf_source);
+ 	kfree(from_name);
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 5940ecabbe6a..59edb8fd33aa 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -596,8 +596,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+ 		if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
+ 			cfile->invalidHandle = true;
+ 			spin_unlock(&cifs_file_list_lock);
+-			if (server->ops->close)
+-				server->ops->close(xid, tcon, &cfile->fid);
++			if (server->ops->close_dir)
++				server->ops->close_dir(xid, tcon, &cfile->fid);
+ 		} else
+ 			spin_unlock(&cifs_file_list_lock);
+ 		if (cfile->srch_inf.ntwrk_buf_start) {
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index 3f17b4550831..45992944e238 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
+ 		goto out;
+ 	}
+ 
+-	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ 			    GFP_KERNEL);
+ 	if (smb2_data == NULL) {
+ 		rc = -ENOMEM;
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 84c012a6aba0..215f8d3e3e53 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 	*adjust_tz = false;
+ 	*symlink = false;
+ 
+-	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ 			    GFP_KERNEL);
+ 	if (smb2_data == NULL)
+ 		return -ENOMEM;
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+index 7c2f45c06fc2..824696fb24db 100644
+--- a/fs/cifs/smb2maperror.c
++++ b/fs/cifs/smb2maperror.c
+@@ -605,7 +605,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ 	{STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
+ 	{STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
+ 	{STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
+-	{STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"},
++	{STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"},
+ 	{STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
+ 	{STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
+ 	{STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 4ac88f89a5e5..8956cf67299b 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -251,7 +251,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 	int rc;
+ 	struct smb2_file_all_info *smb2_data;
+ 
+-	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++	smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ 			    GFP_KERNEL);
+ 	if (smb2_data == NULL)
+ 		return -ENOMEM;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 829ad35f98d4..fb0c67372a90 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -912,7 +912,8 @@ tcon_exit:
+ tcon_error_exit:
+ 	if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
+ 		cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
+-		tcon->bad_network_name = true;
++		if (tcon)
++			tcon->bad_network_name = true;
+ 	}
+ 	goto tcon_exit;
+ }
+@@ -1488,7 +1489,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ {
+ 	return query_info(xid, tcon, persistent_fid, volatile_fid,
+ 			  FILE_ALL_INFORMATION,
+-			  sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
++			  sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ 			  sizeof(struct smb2_file_all_info), data);
+ }
+ 
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 8ef74f3d8fe5..87b70fe7eccc 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -125,8 +125,6 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
+  * This hash-function tries to avoid losing too many bits of hash
+  * information, yet avoid using a prime hash-size or similar.
+  */
+-#define D_HASHBITS     d_hash_shift
+-#define D_HASHMASK     d_hash_mask
+ 
+ static unsigned int d_hash_mask __read_mostly;
+ static unsigned int d_hash_shift __read_mostly;
+@@ -137,8 +135,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
+ 					unsigned int hash)
+ {
+ 	hash += (unsigned long) parent / L1_CACHE_BYTES;
+-	hash = hash + (hash >> D_HASHBITS);
+-	return dentry_hashtable + (hash & D_HASHMASK);
++	return dentry_hashtable + hash_32(hash, d_hash_shift);
+ }
+ 
+ /* Statistics gathering. */
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index e5d9908c0bc3..d65a6260ad61 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2192,6 +2192,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
+ 	struct ext4_map_blocks *map = &mpd->map;
+ 	int err;
+ 	loff_t disksize;
++	int progress = 0;
+ 
+ 	mpd->io_submit.io_end->offset =
+ 				((loff_t)map->m_lblk) << inode->i_blkbits;
+@@ -2208,8 +2209,11 @@ static int mpage_map_and_submit_extent(handle_t *handle,
+ 			 * is non-zero, a commit should free up blocks.
+ 			 */
+ 			if ((err == -ENOMEM) ||
+-			    (err == -ENOSPC && ext4_count_free_clusters(sb)))
++			    (err == -ENOSPC && ext4_count_free_clusters(sb))) {
++				if (progress)
++					goto update_disksize;
+ 				return err;
++			}
+ 			ext4_msg(sb, KERN_CRIT,
+ 				 "Delayed block allocation failed for "
+ 				 "inode %lu at logical offset %llu with"
+@@ -2226,15 +2230,17 @@ static int mpage_map_and_submit_extent(handle_t *handle,
+ 			*give_up_on_write = true;
+ 			return err;
+ 		}
++		progress = 1;
+ 		/*
+ 		 * Update buffer state, submit mapped pages, and get us new
+ 		 * extent to map
+ 		 */
+ 		err = mpage_map_and_submit_buffers(mpd);
+ 		if (err < 0)
+-			return err;
++			goto update_disksize;
+ 	} while (map->m_len);
+ 
++update_disksize:
+ 	/*
+ 	 * Update on-disk size after IO is submitted.  Races with
+ 	 * truncate are avoided by checking i_size under i_data_sem.
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 795d5afc1479..242226a87be7 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1398,6 +1398,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
+ 	int last = first + count - 1;
+ 	struct super_block *sb = e4b->bd_sb;
+ 
++	if (WARN_ON(count == 0))
++		return;
+ 	BUG_ON(last >= (sb->s_blocksize << 3));
+ 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
+ 	/* Don't bother if the block group is corrupt. */
+@@ -3200,6 +3202,8 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
+ 	int err;
+ 
+ 	if (pa == NULL) {
++		if (ac->ac_f_ex.fe_len == 0)
++			return;
+ 		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
+ 		if (err) {
+ 			/*
+@@ -3214,6 +3218,7 @@ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
+ 		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
+ 			       ac->ac_f_ex.fe_len);
+ 		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
++		ext4_mb_unload_buddy(&e4b);
+ 		return;
+ 	}
+ 	if (pa->pa_type == MB_INODE_PA)
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 9afc4ba21611..b52a34bc7600 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3141,9 +3141,9 @@ static int set_journal_csum_feature_set(struct super_block *sb)
+ 
+ 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ 				       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
+-		/* journal checksum v2 */
++		/* journal checksum v3 */
+ 		compat = 0;
+-		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
++		incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
+ 	} else {
+ 		/* journal checksum v1 */
+ 		compat = JBD2_FEATURE_COMPAT_CHECKSUM;
+@@ -3165,6 +3165,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
+ 		jbd2_journal_clear_features(sbi->s_journal,
+ 				JBD2_FEATURE_COMPAT_CHECKSUM, 0,
+ 				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
++				JBD2_FEATURE_INCOMPAT_CSUM_V3 |
+ 				JBD2_FEATURE_INCOMPAT_CSUM_V2);
+ 	}
+ 
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index cf2fc0594063..9181c2b22b3c 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -97,7 +97,7 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
+ 	struct commit_header *h;
+ 	__u32 csum;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return;
+ 
+ 	h = (struct commit_header *)(bh->b_data);
+@@ -313,11 +313,11 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
+ 	return checksum;
+ }
+ 
+-static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
++static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
+ 				   unsigned long long block)
+ {
+ 	tag->t_blocknr = cpu_to_be32(block & (u32)~0);
+-	if (tag_bytes > JBD2_TAG_SIZE32)
++	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_64BIT))
+ 		tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
+ }
+ 
+@@ -327,7 +327,7 @@ static void jbd2_descr_block_csum_set(journal_t *j,
+ 	struct jbd2_journal_block_tail *tail;
+ 	__u32 csum;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return;
+ 
+ 	tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
+@@ -340,12 +340,13 @@ static void jbd2_descr_block_csum_set(journal_t *j,
+ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
+ 				    struct buffer_head *bh, __u32 sequence)
+ {
++	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
+ 	struct page *page = bh->b_page;
+ 	__u8 *addr;
+ 	__u32 csum32;
+ 	__be32 seq;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return;
+ 
+ 	seq = cpu_to_be32(sequence);
+@@ -355,8 +356,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
+ 			     bh->b_size);
+ 	kunmap_atomic(addr);
+ 
+-	/* We only have space to store the lower 16 bits of the crc32c. */
+-	tag->t_checksum = cpu_to_be16(csum32);
++	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++		tag3->t_checksum = cpu_to_be32(csum32);
++	else
++		tag->t_checksum = cpu_to_be16(csum32);
+ }
+ /*
+  * jbd2_journal_commit_transaction
+@@ -396,7 +399,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 	LIST_HEAD(io_bufs);
+ 	LIST_HEAD(log_bufs);
+ 
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (jbd2_journal_has_csum_v2or3(journal))
+ 		csum_size = sizeof(struct jbd2_journal_block_tail);
+ 
+ 	/*
+@@ -692,7 +695,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 			tag_flag |= JBD2_FLAG_SAME_UUID;
+ 
+ 		tag = (journal_block_tag_t *) tagp;
+-		write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
++		write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
+ 		tag->t_flags = cpu_to_be16(tag_flag);
+ 		jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
+ 					commit_transaction->t_tid);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 52032647dd4a..e72faacaf578 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -124,7 +124,7 @@ EXPORT_SYMBOL(__jbd2_debug);
+ /* Checksumming functions */
+ int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
+ {
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
+@@ -145,7 +145,7 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
+ 
+ int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
+ {
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	return sb->s_checksum == jbd2_superblock_csum(j, sb);
+@@ -153,7 +153,7 @@ int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
+ 
+ void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
+ {
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return;
+ 
+ 	sb->s_checksum = jbd2_superblock_csum(j, sb);
+@@ -1524,21 +1524,29 @@ static int journal_get_superblock(journal_t *journal)
+ 		goto out;
+ 	}
+ 
+-	if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
+-	    JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
++	if (jbd2_journal_has_csum_v2or3(journal) &&
++	    JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) {
+ 		/* Can't have checksum v1 and v2 on at the same time! */
+ 		printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
+ 		       "at the same time!\n");
+ 		goto out;
+ 	}
+ 
++	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) &&
++	    JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
++		/* Can't have checksum v2 and v3 at the same time! */
++		printk(KERN_ERR "JBD: Can't enable checksumming v2 and v3 "
++		       "at the same time!\n");
++		goto out;
++	}
++
+ 	if (!jbd2_verify_csum_type(journal, sb)) {
+ 		printk(KERN_ERR "JBD: Unknown checksum type\n");
+ 		goto out;
+ 	}
+ 
+ 	/* Load the checksum driver */
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
++	if (jbd2_journal_has_csum_v2or3(journal)) {
+ 		journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+ 		if (IS_ERR(journal->j_chksum_driver)) {
+ 			printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
+@@ -1555,7 +1563,7 @@ static int journal_get_superblock(journal_t *journal)
+ 	}
+ 
+ 	/* Precompute checksum seed for all metadata */
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (jbd2_journal_has_csum_v2or3(journal))
+ 		journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
+ 						   sizeof(sb->s_uuid));
+ 
+@@ -1815,8 +1823,14 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ 	if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
+ 		return 0;
+ 
+-	/* Asking for checksumming v2 and v1?  Only give them v2. */
+-	if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
++	/* If enabling v2 checksums, turn on v3 instead */
++	if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) {
++		incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2;
++		incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3;
++	}
++
++	/* Asking for checksumming v3 and v1?  Only give them v3. */
++	if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 &&
+ 	    compat & JBD2_FEATURE_COMPAT_CHECKSUM)
+ 		compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
+ 
+@@ -1825,8 +1839,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ 
+ 	sb = journal->j_superblock;
+ 
+-	/* If enabling v2 checksums, update superblock */
+-	if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
++	/* If enabling v3 checksums, update superblock */
++	if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
+ 		sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
+ 		sb->s_feature_compat &=
+ 			~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
+@@ -1844,8 +1858,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ 		}
+ 
+ 		/* Precompute checksum seed for all metadata */
+-		if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+-					      JBD2_FEATURE_INCOMPAT_CSUM_V2))
++		if (jbd2_journal_has_csum_v2or3(journal))
+ 			journal->j_csum_seed = jbd2_chksum(journal, ~0,
+ 							   sb->s_uuid,
+ 							   sizeof(sb->s_uuid));
+@@ -1854,7 +1867,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+ 	/* If enabling v1 checksums, downgrade superblock */
+ 	if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
+ 		sb->s_feature_incompat &=
+-			~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
++			~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 |
++				     JBD2_FEATURE_INCOMPAT_CSUM_V3);
+ 
+ 	sb->s_feature_compat    |= cpu_to_be32(compat);
+ 	sb->s_feature_ro_compat |= cpu_to_be32(ro);
+@@ -2167,16 +2181,20 @@ int jbd2_journal_blocks_per_page(struct inode *inode)
+  */
+ size_t journal_tag_bytes(journal_t *journal)
+ {
+-	journal_block_tag_t tag;
+-	size_t x = 0;
++	size_t sz;
++
++	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++		return sizeof(journal_block_tag3_t);
++
++	sz = sizeof(journal_block_tag_t);
+ 
+ 	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
+-		x += sizeof(tag.t_checksum);
++		sz += sizeof(__u16);
+ 
+ 	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+-		return x + JBD2_TAG_SIZE64;
++		return sz;
+ 	else
+-		return x + JBD2_TAG_SIZE32;
++		return sz - sizeof(__u32);
+ }
+ 
+ /*
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 3929c50428b1..20dbfabbf874 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -181,7 +181,7 @@ static int jbd2_descr_block_csum_verify(journal_t *j,
+ 	__be32 provided;
+ 	__u32 calculated;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
+@@ -205,7 +205,7 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
+ 	int			nr = 0, size = journal->j_blocksize;
+ 	int			tag_bytes = journal_tag_bytes(journal);
+ 
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (jbd2_journal_has_csum_v2or3(journal))
+ 		size -= sizeof(struct jbd2_journal_block_tail);
+ 
+ 	tagp = &bh->b_data[sizeof(journal_header_t)];
+@@ -338,10 +338,11 @@ int jbd2_journal_skip_recovery(journal_t *journal)
+ 	return err;
+ }
+ 
+-static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag)
++static inline unsigned long long read_tag_block(journal_t *journal,
++						journal_block_tag_t *tag)
+ {
+ 	unsigned long long block = be32_to_cpu(tag->t_blocknr);
+-	if (tag_bytes > JBD2_TAG_SIZE32)
++	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+ 		block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32;
+ 	return block;
+ }
+@@ -384,7 +385,7 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+ 	__be32 provided;
+ 	__u32 calculated;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	h = buf;
+@@ -399,17 +400,21 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
+ static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
+ 				      void *buf, __u32 sequence)
+ {
++	journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
+ 	__u32 csum32;
+ 	__be32 seq;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	seq = cpu_to_be32(sequence);
+ 	csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
+ 	csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
+ 
+-	return tag->t_checksum == cpu_to_be16(csum32);
++	if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++		return tag3->t_checksum == cpu_to_be32(csum32);
++	else
++		return tag->t_checksum == cpu_to_be16(csum32);
+ }
+ 
+ static int do_one_pass(journal_t *journal,
+@@ -426,6 +431,7 @@ static int do_one_pass(journal_t *journal,
+ 	int			tag_bytes = journal_tag_bytes(journal);
+ 	__u32			crc32_sum = ~0; /* Transactional Checksums */
+ 	int			descr_csum_size = 0;
++	int			block_error = 0;
+ 
+ 	/*
+ 	 * First thing is to establish what we expect to find in the log
+@@ -512,8 +518,7 @@ static int do_one_pass(journal_t *journal,
+ 		switch(blocktype) {
+ 		case JBD2_DESCRIPTOR_BLOCK:
+ 			/* Verify checksum first */
+-			if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+-					JBD2_FEATURE_INCOMPAT_CSUM_V2))
++			if (jbd2_journal_has_csum_v2or3(journal))
+ 				descr_csum_size =
+ 					sizeof(struct jbd2_journal_block_tail);
+ 			if (descr_csum_size > 0 &&
+@@ -574,7 +579,7 @@ static int do_one_pass(journal_t *journal,
+ 					unsigned long long blocknr;
+ 
+ 					J_ASSERT(obh != NULL);
+-					blocknr = read_tag_block(tag_bytes,
++					blocknr = read_tag_block(journal,
+ 								 tag);
+ 
+ 					/* If the block has been
+@@ -598,7 +603,8 @@ static int do_one_pass(journal_t *journal,
+ 						       "checksum recovering "
+ 						       "block %llu in log\n",
+ 						       blocknr);
+-						continue;
++						block_error = 1;
++						goto skip_write;
+ 					}
+ 
+ 					/* Find a buffer for the new
+@@ -797,7 +803,8 @@ static int do_one_pass(journal_t *journal,
+ 				success = -EIO;
+ 		}
+ 	}
+-
++	if (block_error && success == 0)
++		success = -EIO;
+ 	return success;
+ 
+  failed:
+@@ -811,7 +818,7 @@ static int jbd2_revoke_block_csum_verify(journal_t *j,
+ 	__be32 provided;
+ 	__u32 calculated;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return 1;
+ 
+ 	tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
+diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
+index 198c9c10276d..d5e95a175c92 100644
+--- a/fs/jbd2/revoke.c
++++ b/fs/jbd2/revoke.c
+@@ -91,8 +91,8 @@
+ #include <linux/list.h>
+ #include <linux/init.h>
+ #include <linux/bio.h>
+-#endif
+ #include <linux/log2.h>
++#endif
+ 
+ static struct kmem_cache *jbd2_revoke_record_cache;
+ static struct kmem_cache *jbd2_revoke_table_cache;
+@@ -597,7 +597,7 @@ static void write_one_revoke_record(journal_t *journal,
+ 	offset = *offsetp;
+ 
+ 	/* Do we need to leave space at the end for a checksum? */
+-	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (jbd2_journal_has_csum_v2or3(journal))
+ 		csum_size = sizeof(struct jbd2_journal_revoke_tail);
+ 
+ 	/* Make sure we have a descriptor with space left for the record */
+@@ -644,7 +644,7 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh)
+ 	struct jbd2_journal_revoke_tail *tail;
+ 	__u32 csum;
+ 
+-	if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
++	if (!jbd2_journal_has_csum_v2or3(j))
+ 		return;
+ 
+ 	tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize -
+diff --git a/fs/namei.c b/fs/namei.c
+index e3249d565c95..227c78ae70b4 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -34,6 +34,7 @@
+ #include <linux/device_cgroup.h>
+ #include <linux/fs_struct.h>
+ #include <linux/posix_acl.h>
++#include <linux/hash.h>
+ #include <asm/uaccess.h>
+ 
+ #include "internal.h"
+@@ -1661,8 +1662,7 @@ static inline int can_lookup(struct inode *inode)
+ 
+ static inline unsigned int fold_hash(unsigned long hash)
+ {
+-	hash += hash >> (8*sizeof(int));
+-	return hash;
++	return hash_64(hash, 32);
+ }
+ 
+ #else	/* 32-bit case */
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 7c67de88f3f1..4ea2b7378d8c 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2391,6 +2391,14 @@ long do_mount(const char *dev_name, const char *dir_name,
+ 	if (flags & MS_RDONLY)
+ 		mnt_flags |= MNT_READONLY;
+ 
++	/* The default atime for remount is preservation */
++	if ((flags & MS_REMOUNT) &&
++	    ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
++		       MS_STRICTATIME)) == 0)) {
++		mnt_flags &= ~MNT_ATIME_MASK;
++		mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK;
++	}
++
+ 	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
+ 		   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+ 		   MS_STRICTATIME);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 40062e42c955..067d8c90eb1a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2532,6 +2532,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 	struct nfs4_closedata *calldata = data;
+ 	struct nfs4_state *state = calldata->state;
+ 	struct inode *inode = calldata->inode;
++	bool is_rdonly, is_wronly, is_rdwr;
+ 	int call_close = 0;
+ 
+ 	dprintk("%s: begin!\n", __func__);
+@@ -2539,18 +2540,24 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 		goto out_wait;
+ 
+ 	task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
+-	calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
+ 	spin_lock(&state->owner->so_lock);
++	is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
++	is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
++	is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
++	/* Calculate the current open share mode */
++	calldata->arg.fmode = 0;
++	if (is_rdonly || is_rdwr)
++		calldata->arg.fmode |= FMODE_READ;
++	if (is_wronly || is_rdwr)
++		calldata->arg.fmode |= FMODE_WRITE;
+ 	/* Calculate the change in open mode */
+ 	if (state->n_rdwr == 0) {
+ 		if (state->n_rdonly == 0) {
+-			call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
+-			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
++			call_close |= is_rdonly || is_rdwr;
+ 			calldata->arg.fmode &= ~FMODE_READ;
+ 		}
+ 		if (state->n_wronly == 0) {
+-			call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
+-			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
++			call_close |= is_wronly || is_rdwr;
+ 			calldata->arg.fmode &= ~FMODE_WRITE;
+ 		}
+ 	}
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 3eaa6e30a2dc..cc8c5b32043c 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -672,7 +672,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ 		clp->cl_cb_session = ses;
+ 		args.bc_xprt = conn->cb_xprt;
+ 		args.prognumber = clp->cl_cb_session->se_cb_prog;
+-		args.protocol = XPRT_TRANSPORT_BC_TCP;
++		args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
++				XPRT_TRANSPORT_BC;
+ 		args.authflavor = ses->se_cb_sec.flavor;
+ 	}
+ 	/* Create RPC client */
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 760c85a6f534..4942f4370f60 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -221,7 +221,8 @@ static int nfsd_startup_generic(int nrservs)
+ 	 */
+ 	ret = nfsd_racache_init(2*nrservs);
+ 	if (ret)
+-		return ret;
++		goto dec_users;
++
+ 	ret = nfs4_state_start();
+ 	if (ret)
+ 		goto out_racache;
+@@ -229,6 +230,8 @@ static int nfsd_startup_generic(int nrservs)
+ 
+ out_racache:
+ 	nfsd_racache_shutdown();
++dec_users:
++	nfsd_users--;
+ 	return ret;
+ }
+ 
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index cbd0f1b324b9..09f0d9c374a3 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -304,15 +304,11 @@ static void render_cap_t(struct seq_file *m, const char *header,
+ 	seq_puts(m, header);
+ 	CAP_FOR_EACH_U32(__capi) {
+ 		seq_printf(m, "%08x",
+-			   a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
++			   a->cap[CAP_LAST_U32 - __capi]);
+ 	}
+ 	seq_putc(m, '\n');
+ }
+ 
+-/* Remove non-existent capabilities */
+-#define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \
+-				CAP_TO_MASK(CAP_LAST_CAP + 1) - 1)
+-
+ static inline void task_cap(struct seq_file *m, struct task_struct *p)
+ {
+ 	const struct cred *cred;
+@@ -326,11 +322,6 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
+ 	cap_bset	= cred->cap_bset;
+ 	rcu_read_unlock();
+ 
+-	NORM_CAPS(cap_inheritable);
+-	NORM_CAPS(cap_permitted);
+-	NORM_CAPS(cap_effective);
+-	NORM_CAPS(cap_bset);
+-
+ 	render_cap_t(m, "CapInh:\t", &cap_inheritable);
+ 	render_cap_t(m, "CapPrm:\t", &cap_permitted);
+ 	render_cap_t(m, "CapEff:\t", &cap_effective);
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index be9a1fa2721b..0415a628b2ab 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -1657,11 +1657,72 @@ xfs_vm_readpages(
+ 	return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
+ }
+ 
++/*
++ * This is basically a copy of __set_page_dirty_buffers() with one
++ * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
++ * dirty, we'll never be able to clean them because we don't write buffers
++ * beyond EOF, and that means we can't invalidate pages that span EOF
++ * that have been marked dirty. Further, the dirty state can leak into
++ * the file interior if the file is extended, resulting in all sorts of
++ * bad things happening as the state does not match the underlying data.
++ *
++ * XXX: this really indicates that bufferheads in XFS need to die. Warts like
++ * this only exist because of bufferheads and how the generic code manages them.
++ */
++STATIC int
++xfs_vm_set_page_dirty(
++	struct page		*page)
++{
++	struct address_space	*mapping = page->mapping;
++	struct inode		*inode = mapping->host;
++	loff_t			end_offset;
++	loff_t			offset;
++	int			newly_dirty;
++
++	if (unlikely(!mapping))
++		return !TestSetPageDirty(page);
++
++	end_offset = i_size_read(inode);
++	offset = page_offset(page);
++
++	spin_lock(&mapping->private_lock);
++	if (page_has_buffers(page)) {
++		struct buffer_head *head = page_buffers(page);
++		struct buffer_head *bh = head;
++
++		do {
++			if (offset < end_offset)
++				set_buffer_dirty(bh);
++			bh = bh->b_this_page;
++			offset += 1 << inode->i_blkbits;
++		} while (bh != head);
++	}
++	newly_dirty = !TestSetPageDirty(page);
++	spin_unlock(&mapping->private_lock);
++
++	if (newly_dirty) {
++		/* sigh - __set_page_dirty() is static, so copy it here, too */
++		unsigned long flags;
++
++		spin_lock_irqsave(&mapping->tree_lock, flags);
++		if (page->mapping) {	/* Race with truncate? */
++			WARN_ON_ONCE(!PageUptodate(page));
++			account_page_dirtied(page, mapping);
++			radix_tree_tag_set(&mapping->page_tree,
++					page_index(page), PAGECACHE_TAG_DIRTY);
++		}
++		spin_unlock_irqrestore(&mapping->tree_lock, flags);
++		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
++	}
++	return newly_dirty;
++}
++
+ const struct address_space_operations xfs_address_space_operations = {
+ 	.readpage		= xfs_vm_readpage,
+ 	.readpages		= xfs_vm_readpages,
+ 	.writepage		= xfs_vm_writepage,
+ 	.writepages		= xfs_vm_writepages,
++	.set_page_dirty		= xfs_vm_set_page_dirty,
+ 	.releasepage		= xfs_vm_releasepage,
+ 	.invalidatepage		= xfs_vm_invalidatepage,
+ 	.write_begin		= xfs_vm_write_begin,
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index 1ee776d477c3..895db7a88412 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -1121,7 +1121,8 @@ xfs_qm_dqflush(
+ 	 * Get the buffer containing the on-disk dquot
+ 	 */
+ 	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
+-				   mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL);
++				   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
++				   &xfs_dquot_buf_ops);
+ 	if (error)
+ 		goto out_unlock;
+ 
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index 4c749ab543d0..d56b136e68fe 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -299,7 +299,16 @@ xfs_file_aio_read(
+ 				xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
+ 				return ret;
+ 			}
+-			truncate_pagecache_range(VFS_I(ip), pos, -1);
++
++			/*
++			 * Invalidate whole pages. This can return an error if
++			 * we fail to invalidate a page, but this should never
++			 * happen on XFS. Warn if it does fail.
++			 */
++			ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
++						pos >> PAGE_CACHE_SHIFT, -1);
++			WARN_ON_ONCE(ret);
++			ret = 0;
+ 		}
+ 		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+ 	}
+@@ -678,7 +687,15 @@ xfs_file_dio_aio_write(
+ 						    pos, -1);
+ 		if (ret)
+ 			goto out;
+-		truncate_pagecache_range(VFS_I(ip), pos, -1);
++		/*
++		 * Invalidate whole pages. This can return an error if
++		 * we fail to invalidate a page, but this should never
++		 * happen on XFS. Warn if it does fail.
++		 */
++		ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
++						pos >> PAGE_CACHE_SHIFT, -1);
++		WARN_ON_ONCE(ret);
++		ret = 0;
+ 	}
+ 
+ 	/*
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 39797490a1f1..5b166a07d55e 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -2121,6 +2121,17 @@ xlog_recover_validate_buf_type(
+ 	__uint16_t		magic16;
+ 	__uint16_t		magicda;
+ 
++	/*
++	 * We can only do post recovery validation on items on CRC enabled
++	 * fielsystems as we need to know when the buffer was written to be able
++	 * to determine if we should have replayed the item. If we replay old
++	 * metadata over a newer buffer, then it will enter a temporarily
++	 * inconsistent state resulting in verification failures. Hence for now
++	 * just avoid the verification stage for non-crc filesystems
++	 */
++	if (!xfs_sb_version_hascrc(&mp->m_sb))
++		return;
++
+ 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
+ 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
+ 	magicda = be16_to_cpu(info->magic);
+@@ -2156,8 +2167,6 @@ xlog_recover_validate_buf_type(
+ 		bp->b_ops = &xfs_agf_buf_ops;
+ 		break;
+ 	case XFS_BLFT_AGFL_BUF:
+-		if (!xfs_sb_version_hascrc(&mp->m_sb))
+-			break;
+ 		if (magic32 != XFS_AGFL_MAGIC) {
+ 			xfs_warn(mp, "Bad AGFL block magic!");
+ 			ASSERT(0);
+@@ -2190,10 +2199,6 @@ xlog_recover_validate_buf_type(
+ #endif
+ 		break;
+ 	case XFS_BLFT_DINO_BUF:
+-		/*
+-		 * we get here with inode allocation buffers, not buffers that
+-		 * track unlinked list changes.
+-		 */
+ 		if (magic16 != XFS_DINODE_MAGIC) {
+ 			xfs_warn(mp, "Bad INODE block magic!");
+ 			ASSERT(0);
+@@ -2273,8 +2278,6 @@ xlog_recover_validate_buf_type(
+ 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
+ 		break;
+ 	case XFS_BLFT_ATTR_RMT_BUF:
+-		if (!xfs_sb_version_hascrc(&mp->m_sb))
+-			break;
+ 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
+ 			xfs_warn(mp, "Bad attr remote magic!");
+ 			ASSERT(0);
+@@ -2381,16 +2384,7 @@ xlog_recover_do_reg_buffer(
+ 	/* Shouldn't be any more regions */
+ 	ASSERT(i == item->ri_total);
+ 
+-	/*
+-	 * We can only do post recovery validation on items on CRC enabled
+-	 * fielsystems as we need to know when the buffer was written to be able
+-	 * to determine if we should have replayed the item. If we replay old
+-	 * metadata over a newer buffer, then it will enter a temporarily
+-	 * inconsistent state resulting in verification failures. Hence for now
+-	 * just avoid the verification stage for non-crc filesystems
+-	 */
+-	if (xfs_sb_version_hascrc(&mp->m_sb))
+-		xlog_recover_validate_buf_type(mp, bp, buf_f);
++	xlog_recover_validate_buf_type(mp, bp, buf_f);
+ }
+ 
+ /*
+@@ -2625,12 +2619,29 @@ xlog_recover_buffer_pass2(
+ 	}
+ 
+ 	/*
+-	 * recover the buffer only if we get an LSN from it and it's less than
++	 * Recover the buffer only if we get an LSN from it and it's less than
+ 	 * the lsn of the transaction we are replaying.
++	 *
++	 * Note that we have to be extremely careful of readahead here.
++	 * Readahead does not attach verfiers to the buffers so if we don't
++	 * actually do any replay after readahead because of the LSN we found
++	 * in the buffer if more recent than that current transaction then we
++	 * need to attach the verifier directly. Failure to do so can lead to
++	 * future recovery actions (e.g. EFI and unlinked list recovery) can
++	 * operate on the buffers and they won't get the verifier attached. This
++	 * can lead to blocks on disk having the correct content but a stale
++	 * CRC.
++	 *
++	 * It is safe to assume these clean buffers are currently up to date.
++	 * If the buffer is dirtied by a later transaction being replayed, then
++	 * the verifier will be reset to match whatever recover turns that
++	 * buffer into.
+ 	 */
+ 	lsn = xlog_recover_get_buf_lsn(mp, bp);
+-	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
++	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
++		xlog_recover_validate_buf_type(mp, bp, buf_f);
+ 		goto out_release;
++	}
+ 
+ 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
+ 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index 4688a622b373..794aa2fb9c69 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -1193,6 +1193,12 @@ xfs_qm_dqiter_bufs(
+ 		if (error)
+ 			break;
+ 
++		/*
++		 * A corrupt buffer might not have a verifier attached, so
++		 * make sure we have the correct one attached before writeback
++		 * occurs.
++		 */
++		bp->b_ops = &xfs_dquot_buf_ops;
+ 		xfs_qm_reset_dqcounts(mp, bp, firstid, type);
+ 		xfs_buf_delwri_queue(bp, buffer_list);
+ 		xfs_buf_relse(bp);
+@@ -1276,7 +1282,7 @@ xfs_qm_dqiterate(
+ 					xfs_buf_readahead(mp->m_ddev_targp,
+ 					       XFS_FSB_TO_DADDR(mp, rablkno),
+ 					       mp->m_quotainfo->qi_dqchunklen,
+-					       NULL);
++					       &xfs_dquot_buf_ops);
+ 					rablkno++;
+ 				}
+ 			}
+diff --git a/include/linux/capability.h b/include/linux/capability.h
+index 84b13ad67c1c..aa93e5ef594c 100644
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set;
+ # error Fix up hand-coded capability macro initializers
+ #else /* HAND-CODED capability initializers */
+ 
++#define CAP_LAST_U32			((_KERNEL_CAPABILITY_U32S) - 1)
++#define CAP_LAST_U32_VALID_MASK		(CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
++
+ # define CAP_EMPTY_SET    ((kernel_cap_t){{ 0, 0 }})
+-# define CAP_FULL_SET     ((kernel_cap_t){{ ~0, ~0 }})
++# define CAP_FULL_SET     ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
+ # define CAP_FS_SET       ((kernel_cap_t){{ CAP_FS_MASK_B0 \
+ 				    | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
+ 				    CAP_FS_MASK_B1 } })
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index d5b50a19463c..0dae71e9971c 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -159,7 +159,11 @@ typedef struct journal_header_s
+  * journal_block_tag (in the descriptor).  The other h_chksum* fields are
+  * not used.
+  *
+- * Checksum v1 and v2 are mutually exclusive features.
++ * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
++ * journal_block_tag3_t to store a full 32-bit checksum.  Everything else
++ * is the same as v2.
++ *
++ * Checksum v1, v2, and v3 are mutually exclusive features.
+  */
+ struct commit_header {
+ 	__be32		h_magic;
+@@ -179,6 +183,14 @@ struct commit_header {
+  * raw struct shouldn't be used for pointer math or sizeof() - use
+  * journal_tag_bytes(journal) instead to compute this.
+  */
++typedef struct journal_block_tag3_s
++{
++	__be32		t_blocknr;	/* The on-disk block number */
++	__be32		t_flags;	/* See below */
++	__be32		t_blocknr_high; /* most-significant high 32bits. */
++	__be32		t_checksum;	/* crc32c(uuid+seq+block) */
++} journal_block_tag3_t;
++
+ typedef struct journal_block_tag_s
+ {
+ 	__be32		t_blocknr;	/* The on-disk block number */
+@@ -187,9 +199,6 @@ typedef struct journal_block_tag_s
+ 	__be32		t_blocknr_high; /* most-significant high 32bits. */
+ } journal_block_tag_t;
+ 
+-#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
+-#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
+-
+ /* Tail of descriptor block, for checksumming */
+ struct jbd2_journal_block_tail {
+ 	__be32		t_checksum;	/* crc32c(uuid+descr_block) */
+@@ -284,6 +293,7 @@ typedef struct journal_superblock_s
+ #define JBD2_FEATURE_INCOMPAT_64BIT		0x00000002
+ #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT	0x00000004
+ #define JBD2_FEATURE_INCOMPAT_CSUM_V2		0x00000008
++#define JBD2_FEATURE_INCOMPAT_CSUM_V3		0x00000010
+ 
+ /* Features known to this kernel version: */
+ #define JBD2_KNOWN_COMPAT_FEATURES	JBD2_FEATURE_COMPAT_CHECKSUM
+@@ -291,7 +301,8 @@ typedef struct journal_superblock_s
+ #define JBD2_KNOWN_INCOMPAT_FEATURES	(JBD2_FEATURE_INCOMPAT_REVOKE | \
+ 					JBD2_FEATURE_INCOMPAT_64BIT | \
+ 					JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
+-					JBD2_FEATURE_INCOMPAT_CSUM_V2)
++					JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
++					JBD2_FEATURE_INCOMPAT_CSUM_V3)
+ 
+ #ifdef __KERNEL__
+ 
+@@ -1296,6 +1307,15 @@ static inline int tid_geq(tid_t x, tid_t y)
+ extern int jbd2_journal_blocks_per_page(struct inode *inode);
+ extern size_t journal_tag_bytes(journal_t *journal);
+ 
++static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
++{
++	if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
++	    JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
++		return 1;
++
++	return 0;
++}
++
+ /*
+  * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
+  * transaction control blocks.
+diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
+index b05963f09ebf..f5bfb1a80abe 100644
+--- a/include/linux/sunrpc/svc_xprt.h
++++ b/include/linux/sunrpc/svc_xprt.h
+@@ -32,6 +32,7 @@ struct svc_xprt_class {
+ 	struct svc_xprt_ops	*xcl_ops;
+ 	struct list_head	xcl_list;
+ 	u32			xcl_max_payload;
++	int			xcl_ident;
+ };
+ 
+ /*
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index b4f1effc9216..409fafb63f63 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -149,6 +149,7 @@ struct scsi_device {
+ 	unsigned skip_ms_page_8:1;	/* do not use MODE SENSE page 0x08 */
+ 	unsigned skip_ms_page_3f:1;	/* do not use MODE SENSE page 0x3f */
+ 	unsigned skip_vpd_pages:1;	/* do not read VPD pages */
++	unsigned try_vpd_pages:1;	/* attempt to read VPD pages */
+ 	unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
+ 	unsigned no_start_on_add:1;	/* do not issue start on add */
+ 	unsigned allow_restart:1; /* issue START_UNIT in error handler */
+diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
+index 447d2d7466fc..183eaab7c380 100644
+--- a/include/scsi/scsi_devinfo.h
++++ b/include/scsi/scsi_devinfo.h
+@@ -32,4 +32,9 @@
+ #define BLIST_ATTACH_PQ3	0x1000000 /* Scan: Attach to PQ3 devices */
+ #define BLIST_NO_DIF		0x2000000 /* Disable T10 PI (DIF) */
+ #define BLIST_SKIP_VPD_PAGES	0x4000000 /* Ignore SBC-3 VPD pages */
++#define BLIST_SCSI3LUN		0x8000000 /* Scan more than 256 LUNs
++					     for sequential scan */
++#define BLIST_TRY_VPD_PAGES	0x10000000 /* Attempt to read VPD pages */
++#define BLIST_NO_RSOC		0x20000000 /* don't try to issue RSOC */
++
+ #endif
+diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
+index 99b80abf360a..3066718eb120 100644
+--- a/include/uapi/rdma/rdma_user_cm.h
++++ b/include/uapi/rdma/rdma_user_cm.h
+@@ -34,6 +34,7 @@
+ #define RDMA_USER_CM_H
+ 
+ #include <linux/types.h>
++#include <linux/socket.h>
+ #include <linux/in6.h>
+ #include <rdma/ib_user_verbs.h>
+ #include <rdma/ib_user_sa.h>
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 197a496587a6..4059e949beb2 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1412,7 +1412,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
+ 	audit_log_format(ab, " %s=", prefix);
+ 	CAP_FOR_EACH_U32(i) {
+ 		audit_log_format(ab, "%08x",
+-				 cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
++				 cap->cap[CAP_LAST_U32 - i]);
+ 	}
+ }
+ 
+diff --git a/kernel/capability.c b/kernel/capability.c
+index 788653b97430..50fb74b136db 100644
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -268,6 +268,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
+ 		i++;
+ 	}
+ 
++	effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++	permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++	inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++
+ 	new = prepare_creds();
+ 	if (!new)
+ 		return -ENOMEM;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index f94695c9d38b..e4b9b60e25b1 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2465,6 +2465,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 	 * shared futexes. We need to compare the keys:
+ 	 */
+ 	if (match_futex(&q.key, &key2)) {
++		queue_unlock(&q, hb);
+ 		ret = -EINVAL;
+ 		goto out_put_keys;
+ 	}
+diff --git a/kernel/smp.c b/kernel/smp.c
+index 0564571dcdf7..7d1187c0c2b6 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -650,7 +650,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ 			if (cond_func(cpu, info)) {
+ 				ret = smp_call_function_single(cpu, func,
+ 								info, wait);
+-				WARN_ON_ONCE(!ret);
++				WARN_ON_ONCE(ret);
+ 			}
+ 		preempt_enable();
+ 	}
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index a758ec217bc0..65da8249bae6 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1981,7 +1981,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
+ 
+ /**
+  * rb_update_event - update event type and data
+- * @event: the even to update
++ * @event: the event to update
+  * @type: the type of event
+  * @length: the size of the event field in the ring buffer
+  *
+@@ -3354,21 +3354,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
+ 	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ 
+ 	/* Iterator usage is expected to have record disabled */
+-	if (list_empty(&cpu_buffer->reader_page->list)) {
+-		iter->head_page = rb_set_head_page(cpu_buffer);
+-		if (unlikely(!iter->head_page))
+-			return;
+-		iter->head = iter->head_page->read;
+-	} else {
+-		iter->head_page = cpu_buffer->reader_page;
+-		iter->head = cpu_buffer->reader_page->read;
+-	}
++	iter->head_page = cpu_buffer->reader_page;
++	iter->head = cpu_buffer->reader_page->read;
++
++	iter->cache_reader_page = iter->head_page;
++	iter->cache_read = iter->head;
++
+ 	if (iter->head)
+ 		iter->read_stamp = cpu_buffer->read_stamp;
+ 	else
+ 		iter->read_stamp = iter->head_page->page->time_stamp;
+-	iter->cache_reader_page = cpu_buffer->reader_page;
+-	iter->cache_read = cpu_buffer->read;
+ }
+ 
+ /**
+@@ -3761,12 +3756,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+ 		return NULL;
+ 
+ 	/*
+-	 * We repeat when a time extend is encountered.
+-	 * Since the time extend is always attached to a data event,
+-	 * we should never loop more than once.
+-	 * (We never hit the following condition more than twice).
++	 * We repeat when a time extend is encountered or we hit
++	 * the end of the page. Since the time extend is always attached
++	 * to a data event, we should never loop more than three times.
++	 * Once for going to next page, once on time extend, and
++	 * finally once to get the event.
++	 * (We never hit the following condition more than thrice).
+ 	 */
+-	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
++	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
+ 		return NULL;
+ 
+ 	if (rb_per_cpu_empty(cpu_buffer))
+diff --git a/mm/util.c b/mm/util.c
+index 96da2d7c076c..de943ec0a4c8 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -272,17 +272,14 @@ pid_t vm_is_stack(struct task_struct *task,
+ 
+ 	if (in_group) {
+ 		struct task_struct *t;
+-		rcu_read_lock();
+-		if (!pid_alive(task))
+-			goto done;
+ 
+-		t = task;
+-		do {
++		rcu_read_lock();
++		for_each_thread(task, t) {
+ 			if (vm_is_stack_for_task(t, vma)) {
+ 				ret = t->pid;
+ 				goto done;
+ 			}
+-		} while_each_thread(task, t);
++		}
+ done:
+ 		rcu_read_unlock();
+ 	}
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index a3a81d96314b..2710e850b74c 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -882,7 +882,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
+ 		l2cap_chan_close(chan, 0);
+ 		lock_sock(sk);
+ 
+-		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
++		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++		    !(current->flags & PF_EXITING))
+ 			err = bt_sock_wait_state(sk, BT_CLOSED,
+ 						 sk->sk_lingertime);
+ 	}
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index ca957d34b0c8..19ba192e9dbf 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -1857,10 +1857,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
+ 	/* Get data directly from socket receive queue without copying it. */
+ 	while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
+ 		skb_orphan(skb);
+-		if (!skb_linearize(skb))
++		if (!skb_linearize(skb)) {
+ 			s = rfcomm_recv_frame(s, skb);
+-		else
++			if (!s)
++				break;
++		} else {
+ 			kfree_skb(skb);
++		}
+ 	}
+ 
+ 	if (s && (sk->sk_state == BT_CLOSED))
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index c1c6028e389a..7ca014daa5ab 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -887,7 +887,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
+ 		sk->sk_shutdown = SHUTDOWN_MASK;
+ 		__rfcomm_sock_close(sk);
+ 
+-		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
++		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++		    !(current->flags & PF_EXITING))
+ 			err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+ 	}
+ 	release_sock(sk);
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index d021e441b6e6..4f5f01b779b5 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -913,7 +913,8 @@ static int sco_sock_shutdown(struct socket *sock, int how)
+ 		sco_sock_clear_timer(sk);
+ 		__sco_sock_close(sk);
+ 
+-		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
++		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++		    !(current->flags & PF_EXITING))
+ 			err = bt_sock_wait_state(sk, BT_CLOSED,
+ 						 sk->sk_lingertime);
+ 	}
+@@ -933,7 +934,8 @@ static int sco_sock_release(struct socket *sock)
+ 
+ 	sco_sock_close(sk);
+ 
+-	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) {
++	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
++	    !(current->flags & PF_EXITING)) {
+ 		lock_sock(sk);
+ 		err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+ 		release_sock(sk);
+diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
+index 96238ba95f2b..de6662b14e1f 100644
+--- a/net/ceph/auth_x.c
++++ b/net/ceph/auth_x.c
+@@ -13,8 +13,6 @@
+ #include "auth_x.h"
+ #include "auth_x_protocol.h"
+ 
+-#define TEMP_TICKET_BUF_LEN	256
+-
+ static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
+ 
+ static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
+@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret,
+ }
+ 
+ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
+-			  void **p, void *end, void *obuf, size_t olen)
++			  void **p, void *end, void **obuf, size_t olen)
+ {
+ 	struct ceph_x_encrypt_header head;
+ 	size_t head_len = sizeof(head);
+@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
+ 		return -EINVAL;
+ 
+ 	dout("ceph_x_decrypt len %d\n", len);
+-	ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
+-			    *p, len);
++	if (*obuf == NULL) {
++		*obuf = kmalloc(len, GFP_NOFS);
++		if (!*obuf)
++			return -ENOMEM;
++		olen = len;
++	}
++
++	ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
+ 	if (ret)
+ 		return ret;
+ 	if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
+@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac,
+ 	kfree(th);
+ }
+ 
+-static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
+-				    struct ceph_crypto_key *secret,
+-				    void *buf, void *end)
++static int process_one_ticket(struct ceph_auth_client *ac,
++			      struct ceph_crypto_key *secret,
++			      void **p, void *end)
+ {
+ 	struct ceph_x_info *xi = ac->private;
+-	int num;
+-	void *p = buf;
++	int type;
++	u8 tkt_struct_v, blob_struct_v;
++	struct ceph_x_ticket_handler *th;
++	void *dbuf = NULL;
++	void *dp, *dend;
++	int dlen;
++	char is_enc;
++	struct timespec validity;
++	struct ceph_crypto_key old_key;
++	void *ticket_buf = NULL;
++	void *tp, *tpend;
++	struct ceph_timespec new_validity;
++	struct ceph_crypto_key new_session_key;
++	struct ceph_buffer *new_ticket_blob;
++	unsigned long new_expires, new_renew_after;
++	u64 new_secret_id;
+ 	int ret;
+-	char *dbuf;
+-	char *ticket_buf;
+-	u8 reply_struct_v;
+ 
+-	dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
+-	if (!dbuf)
+-		return -ENOMEM;
++	ceph_decode_need(p, end, sizeof(u32) + 1, bad);
+ 
+-	ret = -ENOMEM;
+-	ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
+-	if (!ticket_buf)
+-		goto out_dbuf;
++	type = ceph_decode_32(p);
++	dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
+ 
+-	ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
+-	reply_struct_v = ceph_decode_8(&p);
+-	if (reply_struct_v != 1)
++	tkt_struct_v = ceph_decode_8(p);
++	if (tkt_struct_v != 1)
+ 		goto bad;
+-	num = ceph_decode_32(&p);
+-	dout("%d tickets\n", num);
+-	while (num--) {
+-		int type;
+-		u8 tkt_struct_v, blob_struct_v;
+-		struct ceph_x_ticket_handler *th;
+-		void *dp, *dend;
+-		int dlen;
+-		char is_enc;
+-		struct timespec validity;
+-		struct ceph_crypto_key old_key;
+-		void *tp, *tpend;
+-		struct ceph_timespec new_validity;
+-		struct ceph_crypto_key new_session_key;
+-		struct ceph_buffer *new_ticket_blob;
+-		unsigned long new_expires, new_renew_after;
+-		u64 new_secret_id;
+-
+-		ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
+-
+-		type = ceph_decode_32(&p);
+-		dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
+-
+-		tkt_struct_v = ceph_decode_8(&p);
+-		if (tkt_struct_v != 1)
+-			goto bad;
+-
+-		th = get_ticket_handler(ac, type);
+-		if (IS_ERR(th)) {
+-			ret = PTR_ERR(th);
+-			goto out;
+-		}
+ 
+-		/* blob for me */
+-		dlen = ceph_x_decrypt(secret, &p, end, dbuf,
+-				      TEMP_TICKET_BUF_LEN);
+-		if (dlen <= 0) {
+-			ret = dlen;
+-			goto out;
+-		}
+-		dout(" decrypted %d bytes\n", dlen);
+-		dend = dbuf + dlen;
+-		dp = dbuf;
++	th = get_ticket_handler(ac, type);
++	if (IS_ERR(th)) {
++		ret = PTR_ERR(th);
++		goto out;
++	}
+ 
+-		tkt_struct_v = ceph_decode_8(&dp);
+-		if (tkt_struct_v != 1)
+-			goto bad;
++	/* blob for me */
++	dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
++	if (dlen <= 0) {
++		ret = dlen;
++		goto out;
++	}
++	dout(" decrypted %d bytes\n", dlen);
++	dp = dbuf;
++	dend = dp + dlen;
+ 
+-		memcpy(&old_key, &th->session_key, sizeof(old_key));
+-		ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+-		if (ret)
+-			goto out;
++	tkt_struct_v = ceph_decode_8(&dp);
++	if (tkt_struct_v != 1)
++		goto bad;
+ 
+-		ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+-		ceph_decode_timespec(&validity, &new_validity);
+-		new_expires = get_seconds() + validity.tv_sec;
+-		new_renew_after = new_expires - (validity.tv_sec / 4);
+-		dout(" expires=%lu renew_after=%lu\n", new_expires,
+-		     new_renew_after);
++	memcpy(&old_key, &th->session_key, sizeof(old_key));
++	ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
++	if (ret)
++		goto out;
+ 
+-		/* ticket blob for service */
+-		ceph_decode_8_safe(&p, end, is_enc, bad);
+-		tp = ticket_buf;
+-		if (is_enc) {
+-			/* encrypted */
+-			dout(" encrypted ticket\n");
+-			dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
+-					      TEMP_TICKET_BUF_LEN);
+-			if (dlen < 0) {
+-				ret = dlen;
+-				goto out;
+-			}
+-			dlen = ceph_decode_32(&tp);
+-		} else {
+-			/* unencrypted */
+-			ceph_decode_32_safe(&p, end, dlen, bad);
+-			ceph_decode_need(&p, end, dlen, bad);
+-			ceph_decode_copy(&p, ticket_buf, dlen);
++	ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
++	ceph_decode_timespec(&validity, &new_validity);
++	new_expires = get_seconds() + validity.tv_sec;
++	new_renew_after = new_expires - (validity.tv_sec / 4);
++	dout(" expires=%lu renew_after=%lu\n", new_expires,
++	     new_renew_after);
++
++	/* ticket blob for service */
++	ceph_decode_8_safe(p, end, is_enc, bad);
++	if (is_enc) {
++		/* encrypted */
++		dout(" encrypted ticket\n");
++		dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
++		if (dlen < 0) {
++			ret = dlen;
++			goto out;
+ 		}
+-		tpend = tp + dlen;
+-		dout(" ticket blob is %d bytes\n", dlen);
+-		ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
+-		blob_struct_v = ceph_decode_8(&tp);
+-		new_secret_id = ceph_decode_64(&tp);
+-		ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
+-		if (ret)
++		tp = ticket_buf;
++		dlen = ceph_decode_32(&tp);
++	} else {
++		/* unencrypted */
++		ceph_decode_32_safe(p, end, dlen, bad);
++		ticket_buf = kmalloc(dlen, GFP_NOFS);
++		if (!ticket_buf) {
++			ret = -ENOMEM;
+ 			goto out;
+-
+-		/* all is well, update our ticket */
+-		ceph_crypto_key_destroy(&th->session_key);
+-		if (th->ticket_blob)
+-			ceph_buffer_put(th->ticket_blob);
+-		th->session_key = new_session_key;
+-		th->ticket_blob = new_ticket_blob;
+-		th->validity = new_validity;
+-		th->secret_id = new_secret_id;
+-		th->expires = new_expires;
+-		th->renew_after = new_renew_after;
+-		dout(" got ticket service %d (%s) secret_id %lld len %d\n",
+-		     type, ceph_entity_type_name(type), th->secret_id,
+-		     (int)th->ticket_blob->vec.iov_len);
+-		xi->have_keys |= th->service;
++		}
++		tp = ticket_buf;
++		ceph_decode_need(p, end, dlen, bad);
++		ceph_decode_copy(p, ticket_buf, dlen);
+ 	}
++	tpend = tp + dlen;
++	dout(" ticket blob is %d bytes\n", dlen);
++	ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
++	blob_struct_v = ceph_decode_8(&tp);
++	new_secret_id = ceph_decode_64(&tp);
++	ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
++	if (ret)
++		goto out;
++
++	/* all is well, update our ticket */
++	ceph_crypto_key_destroy(&th->session_key);
++	if (th->ticket_blob)
++		ceph_buffer_put(th->ticket_blob);
++	th->session_key = new_session_key;
++	th->ticket_blob = new_ticket_blob;
++	th->validity = new_validity;
++	th->secret_id = new_secret_id;
++	th->expires = new_expires;
++	th->renew_after = new_renew_after;
++	dout(" got ticket service %d (%s) secret_id %lld len %d\n",
++	     type, ceph_entity_type_name(type), th->secret_id,
++	     (int)th->ticket_blob->vec.iov_len);
++	xi->have_keys |= th->service;
+ 
+-	ret = 0;
+ out:
+ 	kfree(ticket_buf);
+-out_dbuf:
+ 	kfree(dbuf);
+ 	return ret;
+ 
+@@ -270,6 +255,34 @@ bad:
+ 	goto out;
+ }
+ 
++static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
++				    struct ceph_crypto_key *secret,
++				    void *buf, void *end)
++{
++	void *p = buf;
++	u8 reply_struct_v;
++	u32 num;
++	int ret;
++
++	ceph_decode_8_safe(&p, end, reply_struct_v, bad);
++	if (reply_struct_v != 1)
++		return -EINVAL;
++
++	ceph_decode_32_safe(&p, end, num, bad);
++	dout("%d tickets\n", num);
++
++	while (num--) {
++		ret = process_one_ticket(ac, secret, &p, end);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++
++bad:
++	return -EINVAL;
++}
++
+ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
+ 				   struct ceph_x_ticket_handler *th,
+ 				   struct ceph_x_authorizer *au)
+@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
+ 	struct ceph_x_ticket_handler *th;
+ 	int ret = 0;
+ 	struct ceph_x_authorize_reply reply;
++	void *preply = &reply;
+ 	void *p = au->reply_buf;
+ 	void *end = p + sizeof(au->reply_buf);
+ 
+ 	th = get_ticket_handler(ac, au->service);
+ 	if (IS_ERR(th))
+ 		return PTR_ERR(th);
+-	ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
++	ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));
+ 	if (ret < 0)
+ 		return ret;
+ 	if (ret != sizeof(reply))
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index ce83d07eb419..94e21b9b1c87 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -904,7 +904,7 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
+ 	BUG_ON(page_count > (int)USHRT_MAX);
+ 	cursor->page_count = (unsigned short)page_count;
+ 	BUG_ON(length > SIZE_MAX - cursor->page_offset);
+-	cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE;
++	cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
+ }
+ 
+ static struct page *
+diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
+index 2ac9ef35110b..dbcbf5a4707f 100644
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -1041,7 +1041,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
+ 	if (!m) {
+ 		pr_info("alloc_msg unknown type %d\n", type);
+ 		*skip = 1;
++	} else if (front_len > m->front_alloc_len) {
++		pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
++			   front_len, m->front_alloc_len,
++			   (unsigned int)con->peer_name.type,
++			   le64_to_cpu(con->peer_name.num));
++		ceph_msg_put(m);
++		m = ceph_msg_new(type, front_len, GFP_NOFS, false);
+ 	}
++
+ 	return m;
+ }
+ 
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 6ac0f1c3fc28..8c6e9c75c525 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -683,6 +683,7 @@ static struct svc_xprt_class svc_udp_class = {
+ 	.xcl_owner = THIS_MODULE,
+ 	.xcl_ops = &svc_udp_ops,
+ 	.xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
++	.xcl_ident = XPRT_TRANSPORT_UDP,
+ };
+ 
+ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
+@@ -1277,6 +1278,7 @@ static struct svc_xprt_class svc_tcp_class = {
+ 	.xcl_owner = THIS_MODULE,
+ 	.xcl_ops = &svc_tcp_ops,
+ 	.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
++	.xcl_ident = XPRT_TRANSPORT_TCP,
+ };
+ 
+ void svc_init_xprt_sock(void)
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 095363eee764..42ce6bfc729d 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1290,7 +1290,7 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
+ 		}
+ 	}
+ 	spin_unlock(&xprt_list_lock);
+-	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
++	dprintk("RPC: transport (%d) not supported\n", args->ident);
+ 	return ERR_PTR(-EIO);
+ 
+ found:
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 62e4f9bcc387..ed36cb52cd86 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -89,6 +89,7 @@ struct svc_xprt_class svc_rdma_class = {
+ 	.xcl_owner = THIS_MODULE,
+ 	.xcl_ops = &svc_rdma_ops,
+ 	.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
++	.xcl_ident = XPRT_TRANSPORT_RDMA,
+ };
+ 
+ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
+diff --git a/security/commoncap.c b/security/commoncap.c
+index b9d613e0ef14..963dc5981661 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -421,6 +421,9 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
+ 		cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
+ 	}
+ 
++	cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++	cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
++
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
+index 9cb4a80df98e..bc9983d38ff3 100644
+--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
++++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
+@@ -293,19 +293,19 @@ static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
+ 	unsigned int sample_size = runtime->sample_bits / 8;
+ 	void *buf = runtime->dma_area;
+ 	struct bf5xx_i2s_pcm_data *dma_data;
+-	unsigned int offset, size;
++	unsigned int offset, samples;
+ 
+ 	dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ 
+ 	if (dma_data->tdm_mode) {
+ 		offset = pos * 8 * sample_size;
+-		size = count * 8 * sample_size;
++		samples = count * 8;
+ 	} else {
+ 		offset = frames_to_bytes(runtime, pos);
+-		size = frames_to_bytes(runtime, count);
++		samples = count * runtime->channels;
+ 	}
+ 
+-	snd_pcm_format_set_silence(runtime->format, buf + offset, size);
++	snd_pcm_format_set_silence(runtime->format, buf + offset, samples);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
+index adee866f463f..56bfc679f437 100644
+--- a/sound/soc/codecs/adau1701.c
++++ b/sound/soc/codecs/adau1701.c
+@@ -230,8 +230,10 @@ static int adau1701_reg_read(void *context, unsigned int reg,
+ 
+ 	*value = 0;
+ 
+-	for (i = 0; i < size; i++)
+-		*value |= recv_buf[i] << (i * 8);
++	for (i = 0; i < size; i++) {
++		*value <<= 8;
++		*value |= recv_buf[i];
++	}
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index 9ad8f019adcd..764d0ea42e7c 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -2250,7 +2250,7 @@ static int max98090_probe(struct snd_soc_codec *codec)
+ 	/* Register for interrupts */
+ 	dev_dbg(codec->dev, "irq = %d\n", max98090->irq);
+ 
+-	ret = request_threaded_irq(max98090->irq, NULL,
++	ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL,
+ 		max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ 		"max98090_interrupt", codec);
+ 	if (ret < 0) {
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index c26a8f814b18..aa5253a3548e 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -2061,6 +2061,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5640 = {
+ static const struct regmap_config rt5640_regmap = {
+ 	.reg_bits = 8,
+ 	.val_bits = 16,
++	.use_single_rw = true,
+ 
+ 	.max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) *
+ 					       RT5640_PR_SPACING),
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 86426a117b07..c9ce9772e49b 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -3492,6 +3492,7 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
++/* Should be called with accdet_lock held */
+ static void wm1811_micd_stop(struct snd_soc_codec *codec)
+ {
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+@@ -3499,14 +3500,10 @@ static void wm1811_micd_stop(struct snd_soc_codec *codec)
+ 	if (!wm8994->jackdet)
+ 		return;
+ 
+-	mutex_lock(&wm8994->accdet_lock);
+-
+ 	snd_soc_update_bits(codec, WM8958_MIC_DETECT_1, WM8958_MICD_ENA, 0);
+ 
+ 	wm1811_jackdet_set_mode(codec, WM1811_JACKDET_MODE_JACK);
+ 
+-	mutex_unlock(&wm8994->accdet_lock);
+-
+ 	if (wm8994->wm8994->pdata.jd_ext_cap)
+ 		snd_soc_dapm_disable_pin(&codec->dapm,
+ 					 "MICBIAS2");
+@@ -3547,10 +3544,10 @@ static void wm8958_open_circuit_work(struct work_struct *work)
+ 						  open_circuit_work.work);
+ 	struct device *dev = wm8994->wm8994->dev;
+ 
+-	wm1811_micd_stop(wm8994->hubs.codec);
+-
+ 	mutex_lock(&wm8994->accdet_lock);
+ 
++	wm1811_micd_stop(wm8994->hubs.codec);
++
+ 	dev_dbg(dev, "Reporting open circuit\n");
+ 
+ 	wm8994->jack_mic = false;
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 0d5de6003849..61e871bf63dd 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -1694,3 +1694,5 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(wm_adsp2_init);
++
++MODULE_LICENSE("GPL v2");
+diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
+index a3119a00d8fa..6c6b35e471c8 100644
+--- a/sound/soc/pxa/pxa-ssp.c
++++ b/sound/soc/pxa/pxa-ssp.c
+@@ -725,7 +725,8 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
+ 		ssp_handle = of_parse_phandle(dev->of_node, "port", 0);
+ 		if (!ssp_handle) {
+ 			dev_err(dev, "unable to get 'port' phandle\n");
+-			return -ENODEV;
++			ret = -ENODEV;
++			goto err_priv;
+ 		}
+ 
+ 		priv->ssp = pxa_ssp_request_of(ssp_handle, "SoC audio");
+@@ -766,9 +767,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai)
+ 			  SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 |	\
+ 			  SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+ 
+-#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+-			    SNDRV_PCM_FMTBIT_S24_LE |	\
+-			    SNDRV_PCM_FMTBIT_S32_LE)
++#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
+ 
+ static const struct snd_soc_dai_ops pxa_ssp_dai_ops = {
+ 	.startup	= pxa_ssp_startup,
+diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
+index b302f3b7a587..2ac8d88fe7eb 100644
+--- a/sound/soc/samsung/i2s.c
++++ b/sound/soc/samsung/i2s.c
+@@ -922,11 +922,9 @@ static int i2s_suspend(struct snd_soc_dai *dai)
+ {
+ 	struct i2s_dai *i2s = to_info(dai);
+ 
+-	if (dai->active) {
+-		i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
+-		i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
+-		i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
+-	}
++	i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
++	i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
++	i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
+ 
+ 	return 0;
+ }
+@@ -935,11 +933,9 @@ static int i2s_resume(struct snd_soc_dai *dai)
+ {
+ 	struct i2s_dai *i2s = to_info(dai);
+ 
+-	if (dai->active) {
+-		writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
+-		writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
+-		writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
+-	}
++	writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
++	writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
++	writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 330c9a6b5cb5..875cae86d708 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1882,6 +1882,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
+ 			dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK);
+ 		}
+ 
++		dpcm_path_put(&list);
+ capture:
+ 		/* skip if FE doesn't have capture capability */
+ 		if (!fe->cpu_dai->driver->capture.channels_min)
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 9f3eae290900..2d9ab9417289 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -4,6 +4,7 @@ TARGETS += efivarfs
+ TARGETS += kcmp
+ TARGETS += memory-hotplug
+ TARGETS += mqueue
++TARGETS += mount
+ TARGETS += net
+ TARGETS += ptrace
+ TARGETS += timers
+diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile
+new file mode 100644
+index 000000000000..337d853c2b72
+--- /dev/null
++++ b/tools/testing/selftests/mount/Makefile
+@@ -0,0 +1,17 @@
++# Makefile for mount selftests.
++
++all: unprivileged-remount-test
++
++unprivileged-remount-test: unprivileged-remount-test.c
++	gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test
++
++# Allow specific tests to be selected.
++test_unprivileged_remount: unprivileged-remount-test
++	@if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi
++
++run_tests: all test_unprivileged_remount
++
++clean:
++	rm -f unprivileged-remount-test
++
++.PHONY: all test_unprivileged_remount
+diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c
+new file mode 100644
+index 000000000000..1b3ff2fda4d0
+--- /dev/null
++++ b/tools/testing/selftests/mount/unprivileged-remount-test.c
+@@ -0,0 +1,242 @@
++#define _GNU_SOURCE
++#include <sched.h>
++#include <stdio.h>
++#include <errno.h>
++#include <string.h>
++#include <sys/types.h>
++#include <sys/mount.h>
++#include <sys/wait.h>
++#include <stdlib.h>
++#include <unistd.h>
++#include <fcntl.h>
++#include <grp.h>
++#include <stdbool.h>
++#include <stdarg.h>
++
++#ifndef CLONE_NEWNS
++# define CLONE_NEWNS 0x00020000
++#endif
++#ifndef CLONE_NEWUTS
++# define CLONE_NEWUTS 0x04000000
++#endif
++#ifndef CLONE_NEWIPC
++# define CLONE_NEWIPC 0x08000000
++#endif
++#ifndef CLONE_NEWNET
++# define CLONE_NEWNET 0x40000000
++#endif
++#ifndef CLONE_NEWUSER
++# define CLONE_NEWUSER 0x10000000
++#endif
++#ifndef CLONE_NEWPID
++# define CLONE_NEWPID 0x20000000
++#endif
++
++#ifndef MS_RELATIME
++#define MS_RELATIME (1 << 21)
++#endif
++#ifndef MS_STRICTATIME
++#define MS_STRICTATIME (1 << 24)
++#endif
++
++static void die(char *fmt, ...)
++{
++	va_list ap;
++	va_start(ap, fmt);
++	vfprintf(stderr, fmt, ap);
++	va_end(ap);
++	exit(EXIT_FAILURE);
++}
++
++static void write_file(char *filename, char *fmt, ...)
++{
++	char buf[4096];
++	int fd;
++	ssize_t written;
++	int buf_len;
++	va_list ap;
++
++	va_start(ap, fmt);
++	buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
++	va_end(ap);
++	if (buf_len < 0) {
++		die("vsnprintf failed: %s\n",
++		    strerror(errno));
++	}
++	if (buf_len >= sizeof(buf)) {
++		die("vsnprintf output truncated\n");
++	}
++
++	fd = open(filename, O_WRONLY);
++	if (fd < 0) {
++		die("open of %s failed: %s\n",
++		    filename, strerror(errno));
++	}
++	written = write(fd, buf, buf_len);
++	if (written != buf_len) {
++		if (written >= 0) {
++			die("short write to %s\n", filename);
++		} else {
++			die("write to %s failed: %s\n",
++				filename, strerror(errno));
++		}
++	}
++	if (close(fd) != 0) {
++		die("close of %s failed: %s\n",
++			filename, strerror(errno));
++	}
++}
++
++static void create_and_enter_userns(void)
++{
++	uid_t uid;
++	gid_t gid;
++
++	uid = getuid();
++	gid = getgid();
++
++	if (unshare(CLONE_NEWUSER) !=0) {
++		die("unshare(CLONE_NEWUSER) failed: %s\n",
++			strerror(errno));
++	}
++
++	write_file("/proc/self/uid_map", "0 %d 1", uid);
++	write_file("/proc/self/gid_map", "0 %d 1", gid);
++
++	if (setgroups(0, NULL) != 0) {
++		die("setgroups failed: %s\n",
++			strerror(errno));
++	}
++	if (setgid(0) != 0) {
++		die ("setgid(0) failed %s\n",
++			strerror(errno));
++	}
++	if (setuid(0) != 0) {
++		die("setuid(0) failed %s\n",
++			strerror(errno));
++	}
++}
++
++static
++bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
++{
++	pid_t child;
++
++	child = fork();
++	if (child == -1) {
++		die("fork failed: %s\n",
++			strerror(errno));
++	}
++	if (child != 0) { /* parent */
++		pid_t pid;
++		int status;
++		pid = waitpid(child, &status, 0);
++		if (pid == -1) {
++			die("waitpid failed: %s\n",
++				strerror(errno));
++		}
++		if (pid != child) {
++			die("waited for %d got %d\n",
++				child, pid);
++		}
++		if (!WIFEXITED(status)) {
++			die("child did not terminate cleanly\n");
++		}
++		return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
++	}
++
++	create_and_enter_userns();
++	if (unshare(CLONE_NEWNS) != 0) {
++		die("unshare(CLONE_NEWNS) failed: %s\n",
++			strerror(errno));
++	}
++
++	if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
++		die("mount of /tmp failed: %s\n",
++			strerror(errno));
++	}
++
++	create_and_enter_userns();
++
++	if (unshare(CLONE_NEWNS) != 0) {
++		die("unshare(CLONE_NEWNS) failed: %s\n",
++			strerror(errno));
++	}
++
++	if (mount("/tmp", "/tmp", "none",
++		  MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) {
++		/* system("cat /proc/self/mounts"); */
++		die("remount of /tmp failed: %s\n",
++		    strerror(errno));
++	}
++
++	if (mount("/tmp", "/tmp", "none",
++		  MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) {
++		/* system("cat /proc/self/mounts"); */
++		die("remount of /tmp with invalid flags "
++		    "succeeded unexpectedly\n");
++	}
++	exit(EXIT_SUCCESS);
++}
++
++static bool test_unpriv_remount_simple(int mount_flags)
++{
++	return test_unpriv_remount(mount_flags, mount_flags, 0);
++}
++
++static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
++{
++	return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
++}
++
++int main(int argc, char **argv)
++{
++	if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
++		die("MS_RDONLY malfunctions\n");
++	}
++	if (!test_unpriv_remount_simple(MS_NODEV)) {
++		die("MS_NODEV malfunctions\n");
++	}
++	if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
++		die("MS_NOSUID malfunctions\n");
++	}
++	if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
++		die("MS_NOEXEC malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
++				       MS_NOATIME|MS_NODEV))
++	{
++		die("MS_RELATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
++				       MS_NOATIME|MS_NODEV))
++	{
++		die("MS_STRICTATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
++				       MS_STRICTATIME|MS_NODEV))
++	{
++		die("MS_RELATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
++				       MS_NOATIME|MS_NODEV))
++	{
++		die("MS_RELATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
++				       MS_NOATIME|MS_NODEV))
++	{
++		die("MS_RELATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
++				       MS_STRICTATIME|MS_NODEV))
++	{
++		die("MS_RELATIME malfunctions\n");
++	}
++	if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
++				 MS_NOATIME|MS_NODEV))
++	{
++		die("Default atime malfunctions\n");
++	}
++	return EXIT_SUCCESS;
++}


             reply	other threads:[~2014-09-30 17:16 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-30 17:16 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2017-05-09 16:20 [gentoo-commits] proj/linux-patches:3.12 commit in: / Mike Pagano
2017-03-18 15:33 Mike Pagano
2017-03-10  0:38 Mike Pagano
2017-03-02 16:35 Mike Pagano
2017-03-02 16:35 Mike Pagano
2017-02-01 12:48 Alice Ferrazzi
2016-12-19  0:43 Mike Pagano
2016-12-18 20:59 Mike Pagano
2016-12-09  0:41 Mike Pagano
2016-11-29 17:45 Alice Ferrazzi
2016-11-25 23:24 Mike Pagano
2016-11-11  0:58 Mike Pagano
2016-10-21 11:08 Mike Pagano
2016-09-09 19:25 Mike Pagano
2016-07-22 23:30 Mike Pagano
2016-06-20 19:58 Mike Pagano
2016-05-24 11:58 Mike Pagano
2016-04-28 14:05 Mike Pagano
2016-04-28 12:28 Mike Pagano
2016-04-27 19:40 Mike Pagano
2016-04-13 23:51 Mike Pagano
2016-03-18 18:55 Mike Pagano
2016-03-09 13:50 Mike Pagano
2016-02-26 20:15 Mike Pagano
2016-02-15 19:19 Mike Pagano
2016-01-31 23:57 Mike Pagano
2016-01-31 23:49 Mike Pagano
2016-01-31 23:48 Mike Pagano
2016-01-20 15:53 Mike Pagano
2016-01-09 19:58 Mike Pagano
2015-11-03 18:38 Mike Pagano
2015-10-22 23:08 Mike Pagano
2015-10-22 23:00 Mike Pagano
2015-09-28 14:09 Mike Pagano
2015-09-15 14:24 Mike Pagano
2015-06-19 16:54 Mike Pagano
2015-05-21 23:58 Mike Pagano
2015-05-05 18:23 Mike Pagano
2015-04-10 18:15 Mike Pagano
2015-03-28 22:10 Mike Pagano
2015-03-28 20:29 Mike Pagano
2015-02-20 23:57 Mike Pagano
2015-02-14 22:59 Mike Pagano
2015-02-09 18:56 Mike Pagano
2015-01-02 19:11 Mike Pagano
2014-12-19 23:48 Mike Pagano
2014-12-07 14:48 Mike Pagano
2014-11-06 18:03 Mike Pagano
2014-10-24 19:30 Mike Pagano
2014-10-10 19:56 Mike Pagano
2014-09-20 19:11 Anthony G. Basile
2014-08-19 11:44 Mike Pagano
2014-08-01 23:59 ` Mike Pagano
2014-07-23 11:54 Mike Pagano
2014-08-19 11:44 ` Mike Pagano
2014-07-04 21:45 Vlastimil Babka
2014-06-25 17:21 Mike Pagano
2014-06-25 17:20 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1412097234.540c21f81b78f7c07ce2518aa9d3077f14ae25d6.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox