From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <gentoo-commits+bounces-770141-garchives=archives.gentoo.org@lists.gentoo.org>
Received: from lists.gentoo.org (pigeon.gentoo.org [208.92.234.80])
	by finch.gentoo.org (Postfix) with ESMTP id 7D6A7138A1A
	for <garchives@archives.gentoo.org>; Sat,  7 Feb 2015 01:45:48 +0000 (UTC)
Received: from pigeon.gentoo.org (localhost [127.0.0.1])
	by pigeon.gentoo.org (Postfix) with SMTP id 111AAE08E6;
	Sat,  7 Feb 2015 01:45:48 +0000 (UTC)
Received: from smtp.gentoo.org (smtp.gentoo.org [140.211.166.183])
	(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
	(No client certificate requested)
	by pigeon.gentoo.org (Postfix) with ESMTPS id 934D4E08E6
	for <gentoo-commits@lists.gentoo.org>; Sat,  7 Feb 2015 01:45:47 +0000 (UTC)
Received: from oystercatcher.gentoo.org (oystercatcher.gentoo.org [148.251.78.52])
	(using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits))
	(No client certificate requested)
	by smtp.gentoo.org (Postfix) with ESMTPS id 9E93F340720
	for <gentoo-commits@lists.gentoo.org>; Sat,  7 Feb 2015 01:45:46 +0000 (UTC)
Received: from localhost.localdomain (localhost [127.0.0.1])
	by oystercatcher.gentoo.org (Postfix) with ESMTP id E5D721137A
	for <gentoo-commits@lists.gentoo.org>; Sat,  7 Feb 2015 01:45:44 +0000 (UTC)
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Content-Transfer-Encoding: 8bit
Content-type: text/plain; charset=UTF-8
Reply-To: gentoo-dev@lists.gentoo.org, "Mike Pagano" <mpagano@gentoo.org>
Message-ID: <1423272852.b7bc08ab26f98f81741fbfcfb0219e28e17f88ba.mpagano@gentoo>
Subject: [gentoo-commits] proj/linux-patches:3.10 commit in: /
X-VCS-Repository: proj/linux-patches
X-VCS-Files: 0000_README 1067_linux-3.10.68.patch
X-VCS-Directories: /
X-VCS-Committer: mpagano
X-VCS-Committer-Name: Mike Pagano
X-VCS-Revision: b7bc08ab26f98f81741fbfcfb0219e28e17f88ba
X-VCS-Branch: 3.10
Date: Sat,  7 Feb 2015 01:45:44 +0000 (UTC)
Precedence: bulk
List-Post: <mailto:gentoo-commits@lists.gentoo.org>
List-Help: <mailto:gentoo-commits+help@lists.gentoo.org>
List-Unsubscribe: <mailto:gentoo-commits+unsubscribe@lists.gentoo.org>
List-Subscribe: <mailto:gentoo-commits+subscribe@lists.gentoo.org>
List-Id: Gentoo Linux mail <gentoo-commits.gentoo.org>
X-BeenThere: gentoo-commits@lists.gentoo.org
X-Archives-Salt: 13770306-7a59-4973-b112-fb85434409ef
X-Archives-Hash: 74446ebe9b43124757004723b10d5cba

commit:     b7bc08ab26f98f81741fbfcfb0219e28e17f88ba
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb  7 01:34:12 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Feb  7 01:34:12 2015 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=b7bc08ab

Linux patch 3.10.68

---
 0000_README              |    4 +
 1067_linux-3.10.68.patch | 2322 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2326 insertions(+)

diff --git a/0000_README b/0000_README
index f5f4229..0cdf9c0 100644
--- a/0000_README
+++ b/0000_README
@@ -310,6 +310,10 @@ Patch:  1066_linux-3.10.67.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.10.67
 
+Patch:  1067_linux-3.10.68.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.10.68
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1067_linux-3.10.68.patch b/1067_linux-3.10.68.patch
new file mode 100644
index 0000000..982ee80
--- /dev/null
+++ b/1067_linux-3.10.68.patch
@@ -0,0 +1,2322 @@
+diff --git a/Makefile b/Makefile
+index 7c6711fa3c3f..dd67be657716 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 10
+-SUBLEVEL = 67
++SUBLEVEL = 68
+ EXTRAVERSION =
+ NAME = TOSSUG Baby Fish
+ 
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index da1c77d39327..9ee7e01066f9 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ 
+ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+ {
+-	unsigned long oldval, res;
++	int oldval;
++	unsigned long res;
+ 
+ 	smp_mb();
+ 
+@@ -238,15 +239,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ 
+ #ifndef CONFIG_GENERIC_ATOMIC64
+ typedef struct {
+-	u64 __aligned(8) counter;
++	long long counter;
+ } atomic64_t;
+ 
+ #define ATOMIC64_INIT(i) { (i) }
+ 
+ #ifdef CONFIG_ARM_LPAE
+-static inline u64 atomic64_read(const atomic64_t *v)
++static inline long long atomic64_read(const atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 
+ 	__asm__ __volatile__("@ atomic64_read\n"
+ "	ldrd	%0, %H0, [%1]"
+@@ -257,7 +258,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
+ 	return result;
+ }
+ 
+-static inline void atomic64_set(atomic64_t *v, u64 i)
++static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+ 	__asm__ __volatile__("@ atomic64_set\n"
+ "	strd	%2, %H2, [%1]"
+@@ -266,9 +267,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
+ 	);
+ }
+ #else
+-static inline u64 atomic64_read(const atomic64_t *v)
++static inline long long atomic64_read(const atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 
+ 	__asm__ __volatile__("@ atomic64_read\n"
+ "	ldrexd	%0, %H0, [%1]"
+@@ -279,9 +280,9 @@ static inline u64 atomic64_read(const atomic64_t *v)
+ 	return result;
+ }
+ 
+-static inline void atomic64_set(atomic64_t *v, u64 i)
++static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+-	u64 tmp;
++	long long tmp;
+ 
+ 	__asm__ __volatile__("@ atomic64_set\n"
+ "1:	ldrexd	%0, %H0, [%2]\n"
+@@ -294,9 +295,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
+ }
+ #endif
+ 
+-static inline void atomic64_add(u64 i, atomic64_t *v)
++static inline void atomic64_add(long long i, atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	__asm__ __volatile__("@ atomic64_add\n"
+@@ -311,9 +312,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
+ 	: "cc");
+ }
+ 
+-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
++static inline long long atomic64_add_return(long long i, atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	smp_mb();
+@@ -334,9 +335,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+ 	return result;
+ }
+ 
+-static inline void atomic64_sub(u64 i, atomic64_t *v)
++static inline void atomic64_sub(long long i, atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	__asm__ __volatile__("@ atomic64_sub\n"
+@@ -351,9 +352,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
+ 	: "cc");
+ }
+ 
+-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
++static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	smp_mb();
+@@ -374,9 +375,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+ 	return result;
+ }
+ 
+-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
++static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
++					long long new)
+ {
+-	u64 oldval;
++	long long oldval;
+ 	unsigned long res;
+ 
+ 	smp_mb();
+@@ -398,9 +400,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+ 	return oldval;
+ }
+ 
+-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
++static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	smp_mb();
+@@ -419,9 +421,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+ 	return result;
+ }
+ 
+-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
++static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ {
+-	u64 result;
++	long long result;
+ 	unsigned long tmp;
+ 
+ 	smp_mb();
+@@ -445,9 +447,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+ 	return result;
+ }
+ 
+-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
++static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ {
+-	u64 val;
++	long long val;
+ 	unsigned long tmp;
+ 	int ret = 1;
+ 
+diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
+index 57870ab313c5..d847cbbcee45 100644
+--- a/arch/arm/include/asm/memory.h
++++ b/arch/arm/include/asm/memory.h
+@@ -98,23 +98,19 @@
+ #define TASK_UNMAPPED_BASE	UL(0x00000000)
+ #endif
+ 
+-#ifndef PHYS_OFFSET
+-#define PHYS_OFFSET 		UL(CONFIG_DRAM_BASE)
+-#endif
+-
+ #ifndef END_MEM
+ #define END_MEM     		(UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
+ #endif
+ 
+ #ifndef PAGE_OFFSET
+-#define PAGE_OFFSET		(PHYS_OFFSET)
++#define PAGE_OFFSET		PLAT_PHYS_OFFSET
+ #endif
+ 
+ /*
+  * The module can be at any place in ram in nommu mode.
+  */
+ #define MODULES_END		(END_MEM)
+-#define MODULES_VADDR		(PHYS_OFFSET)
++#define MODULES_VADDR		PAGE_OFFSET
+ 
+ #define XIP_VIRT_ADDR(physaddr)  (physaddr)
+ 
+@@ -141,6 +137,16 @@
+ #define page_to_phys(page)	(__pfn_to_phys(page_to_pfn(page)))
+ #define phys_to_page(phys)	(pfn_to_page(__phys_to_pfn(phys)))
+ 
++/*
++ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
++ * memory.  This is used for XIP and NoMMU kernels, or by kernels which
++ * have their own mach/memory.h.  Assembly code must always use
++ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
++ */
++#ifndef PLAT_PHYS_OFFSET
++#define PLAT_PHYS_OFFSET	UL(CONFIG_PHYS_OFFSET)
++#endif
++
+ #ifndef __ASSEMBLY__
+ 
+ /*
+@@ -183,22 +189,15 @@ static inline unsigned long __phys_to_virt(unsigned long x)
+ 	return t;
+ }
+ #else
++
++#define PHYS_OFFSET	PLAT_PHYS_OFFSET
++
+ #define __virt_to_phys(x)	((x) - PAGE_OFFSET + PHYS_OFFSET)
+ #define __phys_to_virt(x)	((x) - PHYS_OFFSET + PAGE_OFFSET)
+-#endif
+-#endif
+-#endif /* __ASSEMBLY__ */
+ 
+-#ifndef PHYS_OFFSET
+-#ifdef PLAT_PHYS_OFFSET
+-#define PHYS_OFFSET	PLAT_PHYS_OFFSET
+-#else
+-#define PHYS_OFFSET	UL(CONFIG_PHYS_OFFSET)
+ #endif
+ #endif
+ 
+-#ifndef __ASSEMBLY__
+-
+ /*
+  * PFNs are used to describe any physical page; this means
+  * PFN 0 == physical address 0.
+@@ -207,7 +206,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
+  * direct-mapped view.  We assume this is the first page
+  * of RAM in the mem_map as well.
+  */
+-#define PHYS_PFN_OFFSET	(PHYS_OFFSET >> PAGE_SHIFT)
++#define PHYS_PFN_OFFSET	((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+ 
+ /*
+  * These are *only* valid on the kernel direct mapped RAM memory.
+@@ -275,7 +274,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
+ #define ARCH_PFN_OFFSET		PHYS_PFN_OFFSET
+ 
+ #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+-#define virt_addr_valid(kaddr)	((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
++#define virt_addr_valid(kaddr)	(((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
++					&& pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
+ 
+ #endif
+ 
+diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
+index 0d3a28dbc8e5..ed690c49ef93 100644
+--- a/arch/arm/include/asm/module.h
++++ b/arch/arm/include/asm/module.h
+@@ -12,6 +12,8 @@ enum {
+ 	ARM_SEC_CORE,
+ 	ARM_SEC_EXIT,
+ 	ARM_SEC_DEVEXIT,
++	ARM_SEC_HOT,
++	ARM_SEC_UNLIKELY,
+ 	ARM_SEC_MAX,
+ };
+ 
+diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
+index cbdc7a21f869..4355f0ec44d6 100644
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -13,7 +13,7 @@
+ /* PAGE_SHIFT determines the page size */
+ #define PAGE_SHIFT		12
+ #define PAGE_SIZE		(_AC(1,UL) << PAGE_SHIFT)
+-#define PAGE_MASK		(~(PAGE_SIZE-1))
++#define PAGE_MASK		(~((1 << PAGE_SHIFT) - 1))
+ 
+ #ifndef __ASSEMBLY__
+ 
+diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
+index 18f5cef82ad5..add785b1ec0a 100644
+--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
++++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
+@@ -68,6 +68,7 @@
+ #define PTE_TYPE_PAGE		(_AT(pteval_t, 3) << 0)
+ #define PTE_BUFFERABLE		(_AT(pteval_t, 1) << 2)		/* AttrIndx[0] */
+ #define PTE_CACHEABLE		(_AT(pteval_t, 1) << 3)		/* AttrIndx[1] */
++#define PTE_AP2			(_AT(pteval_t, 1) << 7)		/* AP[2] */
+ #define PTE_EXT_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
+ #define PTE_EXT_AF		(_AT(pteval_t, 1) << 10)	/* Access Flag */
+ #define PTE_EXT_NG		(_AT(pteval_t, 1) << 11)	/* nG */
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index 86b8fe398b95..bb017328c5bd 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -33,7 +33,7 @@
+ #define PTRS_PER_PMD		512
+ #define PTRS_PER_PGD		4
+ 
+-#define PTE_HWTABLE_PTRS	(PTRS_PER_PTE)
++#define PTE_HWTABLE_PTRS	(0)
+ #define PTE_HWTABLE_OFF		(0)
+ #define PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(u64))
+ 
+@@ -48,16 +48,16 @@
+ #define PMD_SHIFT		21
+ 
+ #define PMD_SIZE		(1UL << PMD_SHIFT)
+-#define PMD_MASK		(~(PMD_SIZE-1))
++#define PMD_MASK		(~((1 << PMD_SHIFT) - 1))
+ #define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
+-#define PGDIR_MASK		(~(PGDIR_SIZE-1))
++#define PGDIR_MASK		(~((1 << PGDIR_SHIFT) - 1))
+ 
+ /*
+  * section address mask and size definitions.
+  */
+ #define SECTION_SHIFT		21
+ #define SECTION_SIZE		(1UL << SECTION_SHIFT)
+-#define SECTION_MASK		(~(SECTION_SIZE-1))
++#define SECTION_MASK		(~((1 << SECTION_SHIFT) - 1))
+ 
+ #define USER_PTRS_PER_PGD	(PAGE_OFFSET / PGDIR_SIZE)
+ 
+@@ -71,13 +71,13 @@
+ #define L_PTE_PRESENT		(_AT(pteval_t, 3) << 0)		/* Present */
+ #define L_PTE_FILE		(_AT(pteval_t, 1) << 2)		/* only when !PRESENT */
+ #define L_PTE_USER		(_AT(pteval_t, 1) << 6)		/* AP[1] */
+-#define L_PTE_RDONLY		(_AT(pteval_t, 1) << 7)		/* AP[2] */
+ #define L_PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
+ #define L_PTE_YOUNG		(_AT(pteval_t, 1) << 10)	/* AF */
+ #define L_PTE_XN		(_AT(pteval_t, 1) << 54)	/* XN */
+-#define L_PTE_DIRTY		(_AT(pteval_t, 1) << 55)	/* unused */
+-#define L_PTE_SPECIAL		(_AT(pteval_t, 1) << 56)	/* unused */
++#define L_PTE_DIRTY		(_AT(pteval_t, 1) << 55)
++#define L_PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
+ #define L_PTE_NONE		(_AT(pteval_t, 1) << 57)	/* PROT_NONE */
++#define L_PTE_RDONLY		(_AT(pteval_t, 1) << 58)	/* READ ONLY */
+ 
+ /*
+  * To be used in assembly code with the upper page attributes.
+@@ -166,6 +166,23 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+ 		clean_pmd_entry(pmdp);	\
+ 	} while (0)
+ 
++/*
++ * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
++ * that are written to a page table but not for ptes created with mk_pte.
++ *
++ * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
++ * hugetlb_cow, where it is compared with an entry in a page table.
++ * This comparison test fails erroneously leading ultimately to a memory leak.
++ *
++ * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
++ * present before running the comparison.
++ */
++#define __HAVE_ARCH_PTE_SAME
++#define pte_same(pte_a,pte_b)	((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG	\
++					: pte_val(pte_a))				\
++				== (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG	\
++					: pte_val(pte_b)))
++
+ #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
+ 
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 5aac06fcc97e..4043d7f4bc92 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -211,12 +211,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+ 
+ #define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
+ 
++#define pte_isset(pte, val)	((u32)(val) == (val) ? pte_val(pte) & (val) \
++						: !!(pte_val(pte) & (val)))
++#define pte_isclear(pte, val)	(!(pte_val(pte) & (val)))
++
+ #define pte_none(pte)		(!pte_val(pte))
+-#define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT)
+-#define pte_write(pte)		(!(pte_val(pte) & L_PTE_RDONLY))
+-#define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY)
+-#define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG)
+-#define pte_exec(pte)		(!(pte_val(pte) & L_PTE_XN))
++#define pte_present(pte)	(pte_isset((pte), L_PTE_PRESENT))
++#define pte_write(pte)		(pte_isclear((pte), L_PTE_RDONLY))
++#define pte_dirty(pte)		(pte_isset((pte), L_PTE_DIRTY))
++#define pte_young(pte)		(pte_isset((pte), L_PTE_YOUNG))
++#define pte_exec(pte)		(pte_isclear((pte), L_PTE_XN))
+ #define pte_special(pte)	(0)
+ 
+ #define pte_present_user(pte)  (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 8bac553fe213..f935b5f651f0 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -109,7 +109,7 @@ ENTRY(stext)
+ 	sub	r4, r3, r4			@ (PHYS_OFFSET - PAGE_OFFSET)
+ 	add	r8, r8, r4			@ PHYS_OFFSET
+ #else
+-	ldr	r8, =PHYS_OFFSET		@ always constant in this case
++	ldr	r8, =PLAT_PHYS_OFFSET		@ always constant in this case
+ #endif
+ 
+ 	/*
+diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
+index 1e9be5d25e56..af60478f54d0 100644
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -296,6 +296,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ 			maps[ARM_SEC_EXIT].unw_sec = s;
+ 		else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
+ 			maps[ARM_SEC_DEVEXIT].unw_sec = s;
++		else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
++			maps[ARM_SEC_UNLIKELY].unw_sec = s;
++		else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
++			maps[ARM_SEC_HOT].unw_sec = s;
+ 		else if (strcmp(".init.text", secname) == 0)
+ 			maps[ARM_SEC_INIT].txt_sec = s;
+ 		else if (strcmp(".devinit.text", secname) == 0)
+@@ -306,6 +310,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ 			maps[ARM_SEC_EXIT].txt_sec = s;
+ 		else if (strcmp(".devexit.text", secname) == 0)
+ 			maps[ARM_SEC_DEVEXIT].txt_sec = s;
++		else if (strcmp(".text.unlikely", secname) == 0)
++			maps[ARM_SEC_UNLIKELY].txt_sec = s;
++		else if (strcmp(".text.hot", secname) == 0)
++			maps[ARM_SEC_HOT].txt_sec = s;
+ 	}
+ 
+ 	for (i = 0; i < ARM_SEC_MAX; i++)
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 6c9d7054d997..8ca636cf8618 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -429,12 +429,21 @@ void __init dma_contiguous_remap(void)
+ 		map.type = MT_MEMORY_DMA_READY;
+ 
+ 		/*
+-		 * Clear previous low-memory mapping
++		 * Clear previous low-memory mapping to ensure that the
++		 * TLB does not see any conflicting entries, then flush
++		 * the TLB of the old entries before creating new mappings.
++		 *
++		 * This ensures that any speculatively loaded TLB entries
++		 * (even though they may be rare) can not cause any problems,
++		 * and ensures that this code is architecturally compliant.
+ 		 */
+ 		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+ 		     addr += PMD_SIZE)
+ 			pmd_clear(pmd_off_k(addr));
+ 
++		flush_tlb_kernel_range(__phys_to_virt(start),
++				       __phys_to_virt(end));
++
+ 		iotable_init(&map, 1);
+ 	}
+ }
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index fb3c446af9e5..4c7d5cddef35 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -685,7 +685,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+ }
+ 
+ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
+-	unsigned long end, unsigned long phys, const struct mem_type *type)
++				  unsigned long end, phys_addr_t phys,
++				  const struct mem_type *type)
+ {
+ 	pud_t *pud = pud_offset(pgd, addr);
+ 	unsigned long next;
+diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
+index 6f3b0476b729..110e738bc970 100644
+--- a/arch/arm/mm/proc-v7-3level.S
++++ b/arch/arm/mm/proc-v7-3level.S
+@@ -78,8 +78,13 @@ ENTRY(cpu_v7_set_pte_ext)
+ 	tst	rh, #1 << (57 - 32)		@ L_PTE_NONE
+ 	bicne	rl, #L_PTE_VALID
+ 	bne	1f
+-	tst	rh, #1 << (55 - 32)		@ L_PTE_DIRTY
+-	orreq	rl, #L_PTE_RDONLY
++
++	eor	ip, rh, #1 << (55 - 32)	@ toggle L_PTE_DIRTY in temp reg to
++					@ test for !L_PTE_DIRTY || L_PTE_RDONLY
++	tst	ip, #1 << (55 - 32) | 1 << (58 - 32)
++	orrne	rl, #PTE_AP2
++	biceq	rl, #PTE_AP2
++
+ 1:	strd	r2, r3, [r0]
+ 	ALT_SMP(W(nop))
+ 	ALT_UP (mcr	p15, 0, r0, c7, c10, 1)		@ flush_pte
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 94599a65cc66..89e57280d2e2 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -288,6 +288,7 @@ static inline void disable_surveillance(void)
+ 	args.token = rtas_token("set-indicator");
+ 	if (args.token == RTAS_UNKNOWN_SERVICE)
+ 		return;
++	args.token = cpu_to_be32(args.token);
+ 	args.nargs = cpu_to_be32(3);
+ 	args.nret = cpu_to_be32(1);
+ 	args.rets = &args.args[3];
+diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
+index 202d2c85ba2e..9b2622e0a07e 100644
+--- a/drivers/firmware/efi/efi-pstore.c
++++ b/drivers/firmware/efi/efi-pstore.c
+@@ -38,6 +38,12 @@ struct pstore_read_data {
+ 	char **buf;
+ };
+ 
++static inline u64 generic_id(unsigned long timestamp,
++			     unsigned int part, int count)
++{
++	return (timestamp * 100 + part) * 1000 + count;
++}
++
+ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
+ {
+ 	efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+@@ -56,7 +62,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
+ 
+ 	if (sscanf(name, "dump-type%u-%u-%d-%lu",
+ 		   cb_data->type, &part, &cnt, &time) == 4) {
+-		*cb_data->id = part;
++		*cb_data->id = generic_id(time, part, cnt);
+ 		*cb_data->count = cnt;
+ 		cb_data->timespec->tv_sec = time;
+ 		cb_data->timespec->tv_nsec = 0;
+@@ -67,7 +73,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
+ 		 * which doesn't support holding
+ 		 * multiple logs, remains.
+ 		 */
+-		*cb_data->id = part;
++		*cb_data->id = generic_id(time, part, 0);
+ 		*cb_data->count = 0;
+ 		cb_data->timespec->tv_sec = time;
+ 		cb_data->timespec->tv_nsec = 0;
+@@ -185,14 +191,16 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
+ 	char name[DUMP_NAME_LEN];
+ 	efi_char16_t efi_name[DUMP_NAME_LEN];
+ 	int found, i;
++	unsigned int part;
+ 
+-	sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
+-		time.tv_sec);
++	do_div(id, 1000);
++	part = do_div(id, 100);
++	sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count, time.tv_sec);
+ 
+ 	for (i = 0; i < DUMP_NAME_LEN; i++)
+ 		efi_name[i] = name[i];
+ 
+-	edata.id = id;
++	edata.id = part;
+ 	edata.type = type;
+ 	edata.count = count;
+ 	edata.time = time;
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index af909a20dd70..74769724c94a 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -48,7 +48,7 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
+ 		 * Return true to stop looking and return the translation
+ 		 * error via out_gpio
+ 		 */
+-		gg_data->out_gpio = ERR_PTR(ret);
++		gg_data->out_gpio = ret;
+ 		return true;
+ 	 }
+ 
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 5d4a4583d2df..8019e642d2f5 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -40,8 +40,15 @@ static DEFINE_MUTEX(device_list_mutex);
+ static LIST_HEAD(device_list);
+ static struct workqueue_struct *isert_rx_wq;
+ static struct workqueue_struct *isert_comp_wq;
++static struct workqueue_struct *isert_release_wq;
+ static struct kmem_cache *isert_cmd_cache;
+ 
++static int
++isert_rdma_post_recvl(struct isert_conn *isert_conn);
++static int
++isert_rdma_accept(struct isert_conn *isert_conn);
++struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
++
+ static void
+ isert_qp_event_callback(struct ib_event *e, void *context)
+ {
+@@ -107,9 +114,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+ 	attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
+ 	/*
+ 	 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
+-	 * work-around for RDMA_READ..
++	 * work-around for RDMA_READs with ConnectX-2.
++	 *
++	 * Also, still make sure to have at least two SGEs for
++	 * outgoing control PDU responses.
+ 	 */
+-	attr.cap.max_send_sge = devattr.max_sge - 2;
++	attr.cap.max_send_sge = max(2, devattr.max_sge - 2);
+ 	isert_conn->max_sge = attr.cap.max_send_sge;
+ 
+ 	attr.cap.max_recv_sge = 1;
+@@ -124,12 +134,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+ 	ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
+ 	if (ret) {
+ 		pr_err("rdma_create_qp failed for cma_id %d\n", ret);
+-		return ret;
++		goto err;
+ 	}
+ 	isert_conn->conn_qp = cma_id->qp;
+ 	pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
+ 
+ 	return 0;
++err:
++	mutex_lock(&device_list_mutex);
++	device->cq_active_qps[min_index]--;
++	mutex_unlock(&device_list_mutex);
++
++	return ret;
+ }
+ 
+ static void
+@@ -212,6 +228,13 @@ isert_create_device_ib_res(struct isert_device *device)
+ 	struct ib_device *ib_dev = device->ib_device;
+ 	struct isert_cq_desc *cq_desc;
+ 	int ret = 0, i, j;
++	int max_rx_cqe, max_tx_cqe;
++	struct ib_device_attr dev_attr;
++
++	memset(&dev_attr, 0, sizeof(struct ib_device_attr));
++	ret = isert_query_device(device->ib_device, &dev_attr);
++	if (ret)
++		return ret;
+ 
+ 	device->cqs_used = min_t(int, num_online_cpus(),
+ 				 device->ib_device->num_comp_vectors);
+@@ -234,6 +257,9 @@ isert_create_device_ib_res(struct isert_device *device)
+ 		goto out_cq_desc;
+ 	}
+ 
++	max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr.max_cqe);
++	max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr.max_cqe);
++
+ 	for (i = 0; i < device->cqs_used; i++) {
+ 		cq_desc[i].device = device;
+ 		cq_desc[i].cq_index = i;
+@@ -242,7 +268,7 @@ isert_create_device_ib_res(struct isert_device *device)
+ 						isert_cq_rx_callback,
+ 						isert_cq_event_callback,
+ 						(void *)&cq_desc[i],
+-						ISER_MAX_RX_CQ_LEN, i);
++						max_rx_cqe, i);
+ 		if (IS_ERR(device->dev_rx_cq[i])) {
+ 			ret = PTR_ERR(device->dev_rx_cq[i]);
+ 			device->dev_rx_cq[i] = NULL;
+@@ -253,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device)
+ 						isert_cq_tx_callback,
+ 						isert_cq_event_callback,
+ 						(void *)&cq_desc[i],
+-						ISER_MAX_TX_CQ_LEN, i);
++						max_tx_cqe, i);
+ 		if (IS_ERR(device->dev_tx_cq[i])) {
+ 			ret = PTR_ERR(device->dev_tx_cq[i]);
+ 			device->dev_tx_cq[i] = NULL;
+@@ -375,8 +401,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
+ static int
+ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+-	struct iscsi_np *np = cma_id->context;
+-	struct isert_np *isert_np = np->np_context;
++	struct isert_np *isert_np = cma_id->context;
++	struct iscsi_np *np = isert_np->np;
+ 	struct isert_conn *isert_conn;
+ 	struct isert_device *device;
+ 	struct ib_device *ib_dev = cma_id->device;
+@@ -401,12 +427,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	isert_conn->state = ISER_CONN_INIT;
+ 	INIT_LIST_HEAD(&isert_conn->conn_accept_node);
+ 	init_completion(&isert_conn->conn_login_comp);
++	init_completion(&isert_conn->login_req_comp);
+ 	init_completion(&isert_conn->conn_wait);
+ 	init_completion(&isert_conn->conn_wait_comp_err);
+ 	kref_init(&isert_conn->conn_kref);
+ 	mutex_init(&isert_conn->conn_mutex);
+ 
+-	cma_id->context = isert_conn;
+ 	isert_conn->conn_cm_id = cma_id;
+ 	isert_conn->responder_resources = event->param.conn.responder_resources;
+ 	isert_conn->initiator_depth = event->param.conn.initiator_depth;
+@@ -466,6 +492,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	if (ret)
+ 		goto out_conn_dev;
+ 
++	ret = isert_rdma_post_recvl(isert_conn);
++	if (ret)
++		goto out_conn_dev;
++
++	ret = isert_rdma_accept(isert_conn);
++	if (ret)
++		goto out_conn_dev;
++
+ 	mutex_lock(&isert_np->np_accept_mutex);
+ 	list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
+ 	mutex_unlock(&isert_np->np_accept_mutex);
+@@ -486,6 +520,7 @@ out_login_buf:
+ 	kfree(isert_conn->login_buf);
+ out:
+ 	kfree(isert_conn);
++	rdma_reject(cma_id, NULL, 0);
+ 	return ret;
+ }
+ 
+@@ -498,18 +533,20 @@ isert_connect_release(struct isert_conn *isert_conn)
+ 
+ 	pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+ 
++	isert_free_rx_descriptors(isert_conn);
++	rdma_destroy_id(isert_conn->conn_cm_id);
++
+ 	if (isert_conn->conn_qp) {
+ 		cq_index = ((struct isert_cq_desc *)
+ 			isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
+ 		pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
++		mutex_lock(&device_list_mutex);
+ 		isert_conn->conn_device->cq_active_qps[cq_index]--;
++		mutex_unlock(&device_list_mutex);
+ 
+-		rdma_destroy_qp(isert_conn->conn_cm_id);
++		ib_destroy_qp(isert_conn->conn_qp);
+ 	}
+ 
+-	isert_free_rx_descriptors(isert_conn);
+-	rdma_destroy_id(isert_conn->conn_cm_id);
+-
+ 	if (isert_conn->login_buf) {
+ 		ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
+ 				    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+@@ -529,9 +566,19 @@ isert_connect_release(struct isert_conn *isert_conn)
+ static void
+ isert_connected_handler(struct rdma_cm_id *cma_id)
+ {
+-	struct isert_conn *isert_conn = cma_id->context;
++	struct isert_conn *isert_conn = cma_id->qp->qp_context;
++
++	pr_info("conn %p\n", isert_conn);
+ 
+-	kref_get(&isert_conn->conn_kref);
++	if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
++		pr_warn("conn %p connect_release is running\n", isert_conn);
++		return;
++	}
++
++	mutex_lock(&isert_conn->conn_mutex);
++	if (isert_conn->state != ISER_CONN_FULL_FEATURE)
++		isert_conn->state = ISER_CONN_UP;
++	mutex_unlock(&isert_conn->conn_mutex);
+ }
+ 
+ static void
+@@ -552,65 +599,108 @@ isert_put_conn(struct isert_conn *isert_conn)
+ 	kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
+ }
+ 
++/**
++ * isert_conn_terminate() - Initiate connection termination
++ * @isert_conn: isert connection struct
++ *
++ * Notes:
++ * In case the connection state is FULL_FEATURE, move state
++ * to TEMINATING and start teardown sequence (rdma_disconnect).
++ * In case the connection state is UP, complete flush as well.
++ *
++ * This routine must be called with conn_mutex held. Thus it is
++ * safe to call multiple times.
++ */
+ static void
+-isert_disconnect_work(struct work_struct *work)
++isert_conn_terminate(struct isert_conn *isert_conn)
+ {
+-	struct isert_conn *isert_conn = container_of(work,
+-				struct isert_conn, conn_logout_work);
++	int err;
+ 
+-	pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+-	mutex_lock(&isert_conn->conn_mutex);
+-	if (isert_conn->state == ISER_CONN_UP)
++	switch (isert_conn->state) {
++	case ISER_CONN_TERMINATING:
++		break;
++	case ISER_CONN_UP:
++		/*
++		 * No flush completions will occur as we didn't
++		 * get to ISER_CONN_FULL_FEATURE yet, complete
++		 * to allow teardown progress.
++		 */
++		complete(&isert_conn->conn_wait_comp_err);
++	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
++		pr_info("Terminating conn %p state %d\n",
++			   isert_conn, isert_conn->state);
+ 		isert_conn->state = ISER_CONN_TERMINATING;
+-
+-	if (isert_conn->post_recv_buf_count == 0 &&
+-	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
+-		mutex_unlock(&isert_conn->conn_mutex);
+-		goto wake_up;
+-	}
+-	if (!isert_conn->conn_cm_id) {
+-		mutex_unlock(&isert_conn->conn_mutex);
+-		isert_put_conn(isert_conn);
+-		return;
++		err = rdma_disconnect(isert_conn->conn_cm_id);
++		if (err)
++			pr_warn("Failed rdma_disconnect isert_conn %p\n",
++				   isert_conn);
++		break;
++	default:
++		pr_warn("conn %p teminating in state %d\n",
++			   isert_conn, isert_conn->state);
+ 	}
++}
+ 
+-	if (isert_conn->disconnect) {
+-		/* Send DREQ/DREP towards our initiator */
+-		rdma_disconnect(isert_conn->conn_cm_id);
+-	}
++static int
++isert_np_cma_handler(struct isert_np *isert_np,
++		     enum rdma_cm_event_type event)
++{
++	pr_debug("isert np %p, handling event %d\n", isert_np, event);
+ 
+-	mutex_unlock(&isert_conn->conn_mutex);
++	switch (event) {
++	case RDMA_CM_EVENT_DEVICE_REMOVAL:
++		isert_np->np_cm_id = NULL;
++		break;
++	case RDMA_CM_EVENT_ADDR_CHANGE:
++		isert_np->np_cm_id = isert_setup_id(isert_np);
++		if (IS_ERR(isert_np->np_cm_id)) {
++			pr_err("isert np %p setup id failed: %ld\n",
++				 isert_np, PTR_ERR(isert_np->np_cm_id));
++			isert_np->np_cm_id = NULL;
++		}
++		break;
++	default:
++		pr_err("isert np %p Unexpected event %d\n",
++			  isert_np, event);
++	}
+ 
+-wake_up:
+-	complete(&isert_conn->conn_wait);
++	return -1;
+ }
+ 
+ static int
+-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
++isert_disconnected_handler(struct rdma_cm_id *cma_id,
++			   enum rdma_cm_event_type event)
+ {
++	struct isert_np *isert_np = cma_id->context;
+ 	struct isert_conn *isert_conn;
+ 
+-	if (!cma_id->qp) {
+-		struct isert_np *isert_np = cma_id->context;
++	if (isert_np->np_cm_id == cma_id)
++		return isert_np_cma_handler(cma_id->context, event);
+ 
+-		isert_np->np_cm_id = NULL;
+-		return -1;
+-	}
++	isert_conn = cma_id->qp->qp_context;
+ 
+-	isert_conn = (struct isert_conn *)cma_id->context;
++	mutex_lock(&isert_conn->conn_mutex);
++	isert_conn_terminate(isert_conn);
++	mutex_unlock(&isert_conn->conn_mutex);
+ 
+-	isert_conn->disconnect = disconnect;
+-	INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+-	schedule_work(&isert_conn->conn_logout_work);
++	pr_info("conn %p completing conn_wait\n", isert_conn);
++	complete(&isert_conn->conn_wait);
+ 
+ 	return 0;
+ }
+ 
++static void
++isert_connect_error(struct rdma_cm_id *cma_id)
++{
++	struct isert_conn *isert_conn = cma_id->qp->qp_context;
++
++	isert_put_conn(isert_conn);
++}
++
+ static int
+ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
+ 	int ret = 0;
+-	bool disconnect = false;
+ 
+ 	pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
+ 		 event->event, event->status, cma_id->context, cma_id);
+@@ -628,11 +718,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ 	case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
+ 	case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
+ 	case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+-		disconnect = true;
+ 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
+-		ret = isert_disconnected_handler(cma_id, disconnect);
++		ret = isert_disconnected_handler(cma_id, event->event);
+ 		break;
++	case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
++	case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
+ 	case RDMA_CM_EVENT_CONNECT_ERROR:
++		isert_connect_error(cma_id);
++		break;
+ 	default:
+ 		pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+ 		break;
+@@ -834,7 +927,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+ 			if (ret)
+ 				return ret;
+ 
+-			isert_conn->state = ISER_CONN_UP;
++			/* Now we are in FULL_FEATURE phase */
++			mutex_lock(&isert_conn->conn_mutex);
++			isert_conn->state = ISER_CONN_FULL_FEATURE;
++			mutex_unlock(&isert_conn->conn_mutex);
+ 			goto post_send;
+ 		}
+ 
+@@ -851,18 +947,17 @@ post_send:
+ }
+ 
+ static void
+-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
+-		   struct isert_conn *isert_conn)
++isert_rx_login_req(struct isert_conn *isert_conn)
+ {
++	struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
++	int rx_buflen = isert_conn->login_req_len;
+ 	struct iscsi_conn *conn = isert_conn->conn;
+ 	struct iscsi_login *login = conn->conn_login;
+ 	int size;
+ 
+-	if (!login) {
+-		pr_err("conn->conn_login is NULL\n");
+-		dump_stack();
+-		return;
+-	}
++	pr_info("conn %p\n", isert_conn);
++
++	WARN_ON_ONCE(!login);
+ 
+ 	if (login->first_request) {
+ 		struct iscsi_login_req *login_req =
+@@ -892,7 +987,8 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
+ 		 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
+ 	memcpy(login->req_buf, &rx_desc->data[0], size);
+ 
+-	complete(&isert_conn->conn_login_comp);
++	if (login->first_request)
++		complete(&isert_conn->conn_login_comp);
+ }
+ 
+ static void
+@@ -1169,11 +1265,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
+ 		 hdr->opcode, hdr->itt, hdr->flags,
+ 		 (int)(xfer_len - ISER_HEADERS_LEN));
+ 
+-	if ((char *)desc == isert_conn->login_req_buf)
+-		isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
+-				   isert_conn);
+-	else
++	if ((char *)desc == isert_conn->login_req_buf) {
++		isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
++		if (isert_conn->conn) {
++			struct iscsi_login *login = isert_conn->conn->conn_login;
++
++			if (login && !login->first_request)
++				isert_rx_login_req(isert_conn);
++		}
++		mutex_lock(&isert_conn->conn_mutex);
++		complete(&isert_conn->login_req_comp);
++		mutex_unlock(&isert_conn->conn_mutex);
++	} else {
+ 		isert_rx_do_work(desc, isert_conn);
++	}
+ 
+ 	ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
+ 				      DMA_FROM_DEVICE);
+@@ -1483,7 +1588,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
+ 		msleep(3000);
+ 
+ 	mutex_lock(&isert_conn->conn_mutex);
+-	isert_conn->state = ISER_CONN_DOWN;
++	isert_conn_terminate(isert_conn);
+ 	mutex_unlock(&isert_conn->conn_mutex);
+ 
+ 	iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+@@ -2044,13 +2149,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+ 	return ret;
+ }
+ 
++struct rdma_cm_id *
++isert_setup_id(struct isert_np *isert_np)
++{
++	struct iscsi_np *np = isert_np->np;
++	struct rdma_cm_id *id;
++	struct sockaddr *sa;
++	int ret;
++
++	sa = (struct sockaddr *)&np->np_sockaddr;
++	pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
++
++	id = rdma_create_id(isert_cma_handler, isert_np,
++			    RDMA_PS_TCP, IB_QPT_RC);
++	if (IS_ERR(id)) {
++		pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
++		ret = PTR_ERR(id);
++		goto out;
++	}
++	pr_debug("id %p context %p\n", id, id->context);
++
++	ret = rdma_bind_addr(id, sa);
++	if (ret) {
++		pr_err("rdma_bind_addr() failed: %d\n", ret);
++		goto out_id;
++	}
++
++	ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
++	if (ret) {
++		pr_err("rdma_listen() failed: %d\n", ret);
++		goto out_id;
++	}
++
++	return id;
++out_id:
++	rdma_destroy_id(id);
++out:
++	return ERR_PTR(ret);
++}
++
+ static int
+ isert_setup_np(struct iscsi_np *np,
+ 	       struct __kernel_sockaddr_storage *ksockaddr)
+ {
+ 	struct isert_np *isert_np;
+ 	struct rdma_cm_id *isert_lid;
+-	struct sockaddr *sa;
+ 	int ret;
+ 
+ 	isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
+@@ -2062,9 +2205,8 @@ isert_setup_np(struct iscsi_np *np,
+ 	mutex_init(&isert_np->np_accept_mutex);
+ 	INIT_LIST_HEAD(&isert_np->np_accept_list);
+ 	init_completion(&isert_np->np_login_comp);
++	isert_np->np = np;
+ 
+-	sa = (struct sockaddr *)ksockaddr;
+-	pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
+ 	/*
+ 	 * Setup the np->np_sockaddr from the passed sockaddr setup
+ 	 * in iscsi_target_configfs.c code..
+@@ -2072,37 +2214,20 @@ isert_setup_np(struct iscsi_np *np,
+ 	memcpy(&np->np_sockaddr, ksockaddr,
+ 	       sizeof(struct __kernel_sockaddr_storage));
+ 
+-	isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
+-				IB_QPT_RC);
++	isert_lid = isert_setup_id(isert_np);
+ 	if (IS_ERR(isert_lid)) {
+-		pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
+-		       PTR_ERR(isert_lid));
+ 		ret = PTR_ERR(isert_lid);
+ 		goto out;
+ 	}
+ 
+-	ret = rdma_bind_addr(isert_lid, sa);
+-	if (ret) {
+-		pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
+-		goto out_lid;
+-	}
+-
+-	ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
+-	if (ret) {
+-		pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
+-		goto out_lid;
+-	}
+-
+ 	isert_np->np_cm_id = isert_lid;
+ 	np->np_context = isert_np;
+-	pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
+ 
+ 	return 0;
+ 
+-out_lid:
+-	rdma_destroy_id(isert_lid);
+ out:
+ 	kfree(isert_np);
++
+ 	return ret;
+ }
+ 
+@@ -2138,13 +2263,27 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+ 	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ 	int ret;
+ 
+-	pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
++	pr_info("before login_req comp conn: %p\n", isert_conn);
++	ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
++	if (ret) {
++		pr_err("isert_conn %p interrupted before got login req\n",
++			isert_conn);
++		return ret;
++	}
++	isert_conn->login_req_comp.done = 0;
++
++	if (!login->first_request)
++		return 0;
++
++	isert_rx_login_req(isert_conn);
++
++	pr_info("before conn_login_comp conn: %p\n", conn);
+ 
+ 	ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
+ 	if (ret)
+ 		return ret;
+ 
+-	pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
++	pr_info("processing login->req: %p\n", login->req);
+ 	return 0;
+ }
+ 
+@@ -2222,17 +2361,10 @@ accept_wait:
+ 	isert_conn->conn = conn;
+ 	max_accept = 0;
+ 
+-	ret = isert_rdma_post_recvl(isert_conn);
+-	if (ret)
+-		return ret;
+-
+-	ret = isert_rdma_accept(isert_conn);
+-	if (ret)
+-		return ret;
+-
+ 	isert_set_conn_info(np, conn, isert_conn);
+ 
+-	pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
++	pr_debug("Processing isert_conn: %p\n", isert_conn);
++
+ 	return 0;
+ }
+ 
+@@ -2248,6 +2380,24 @@ isert_free_np(struct iscsi_np *np)
+ 	kfree(isert_np);
+ }
+ 
++static void isert_release_work(struct work_struct *work)
++{
++	struct isert_conn *isert_conn = container_of(work,
++						     struct isert_conn,
++						     release_work);
++
++	pr_info("Starting release conn %p\n", isert_conn);
++
++	wait_for_completion(&isert_conn->conn_wait);
++
++	mutex_lock(&isert_conn->conn_mutex);
++	isert_conn->state = ISER_CONN_DOWN;
++	mutex_unlock(&isert_conn->conn_mutex);
++
++	pr_info("Destroying conn %p\n", isert_conn);
++	isert_put_conn(isert_conn);
++}
++
+ static void isert_wait_conn(struct iscsi_conn *conn)
+ {
+ 	struct isert_conn *isert_conn = conn->context;
+@@ -2255,10 +2405,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ 	pr_debug("isert_wait_conn: Starting \n");
+ 
+ 	mutex_lock(&isert_conn->conn_mutex);
+-	if (isert_conn->conn_cm_id) {
+-		pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
+-		rdma_disconnect(isert_conn->conn_cm_id);
+-	}
+ 	/*
+ 	 * Only wait for conn_wait_comp_err if the isert_conn made it
+ 	 * into full feature phase..
+@@ -2267,14 +2413,13 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ 		mutex_unlock(&isert_conn->conn_mutex);
+ 		return;
+ 	}
+-	if (isert_conn->state == ISER_CONN_UP)
+-		isert_conn->state = ISER_CONN_TERMINATING;
++	isert_conn_terminate(isert_conn);
+ 	mutex_unlock(&isert_conn->conn_mutex);
+ 
+ 	wait_for_completion(&isert_conn->conn_wait_comp_err);
+ 
+-	wait_for_completion(&isert_conn->conn_wait);
+-	isert_put_conn(isert_conn);
++	INIT_WORK(&isert_conn->release_work, isert_release_work);
++	queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ 
+ static void isert_free_conn(struct iscsi_conn *conn)
+@@ -2320,20 +2465,30 @@ static int __init isert_init(void)
+ 		goto destroy_rx_wq;
+ 	}
+ 
++	isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
++					WQ_UNBOUND_MAX_ACTIVE);
++	if (!isert_release_wq) {
++		pr_err("Unable to allocate isert_release_wq\n");
++		ret = -ENOMEM;
++		goto destroy_comp_wq;
++	}
++
+ 	isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
+ 			sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
+ 			0, NULL);
+ 	if (!isert_cmd_cache) {
+ 		pr_err("Unable to create isert_cmd_cache\n");
+ 		ret = -ENOMEM;
+-		goto destroy_tx_cq;
++		goto destroy_release_wq;
+ 	}
+ 
+ 	iscsit_register_transport(&iser_target_transport);
+-	pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
++	pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
+ 	return 0;
+ 
+-destroy_tx_cq:
++destroy_release_wq:
++	destroy_workqueue(isert_release_wq);
++destroy_comp_wq:
+ 	destroy_workqueue(isert_comp_wq);
+ destroy_rx_wq:
+ 	destroy_workqueue(isert_rx_wq);
+@@ -2344,6 +2499,7 @@ static void __exit isert_exit(void)
+ {
+ 	flush_scheduled_work();
+ 	kmem_cache_destroy(isert_cmd_cache);
++	destroy_workqueue(isert_release_wq);
+ 	destroy_workqueue(isert_comp_wq);
+ 	destroy_workqueue(isert_rx_wq);
+ 	iscsit_unregister_transport(&iser_target_transport);
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
+index 032f65abee36..b233ee5e46b0 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -21,6 +21,7 @@ enum iser_ib_op_code {
+ enum iser_conn_state {
+ 	ISER_CONN_INIT,
+ 	ISER_CONN_UP,
++	ISER_CONN_FULL_FEATURE,
+ 	ISER_CONN_TERMINATING,
+ 	ISER_CONN_DOWN,
+ };
+@@ -87,6 +88,7 @@ struct isert_conn {
+ 	char			*login_req_buf;
+ 	char			*login_rsp_buf;
+ 	u64			login_req_dma;
++	int			login_req_len;
+ 	u64			login_rsp_dma;
+ 	unsigned int		conn_rx_desc_head;
+ 	struct iser_rx_desc	*conn_rx_descs;
+@@ -94,18 +96,18 @@ struct isert_conn {
+ 	struct iscsi_conn	*conn;
+ 	struct list_head	conn_accept_node;
+ 	struct completion	conn_login_comp;
++	struct completion	login_req_comp;
+ 	struct iser_tx_desc	conn_login_tx_desc;
+ 	struct rdma_cm_id	*conn_cm_id;
+ 	struct ib_pd		*conn_pd;
+ 	struct ib_mr		*conn_mr;
+ 	struct ib_qp		*conn_qp;
+ 	struct isert_device	*conn_device;
+-	struct work_struct	conn_logout_work;
+ 	struct mutex		conn_mutex;
+ 	struct completion	conn_wait;
+ 	struct completion	conn_wait_comp_err;
+ 	struct kref		conn_kref;
+-	bool			disconnect;
++	struct work_struct	release_work;
+ };
+ 
+ #define ISERT_MAX_CQ 64
+@@ -131,6 +133,7 @@ struct isert_device {
+ };
+ 
+ struct isert_np {
++	struct iscsi_np         *np;
+ 	struct semaphore	np_sem;
+ 	struct rdma_cm_id	*np_cm_id;
+ 	struct mutex		np_accept_mutex;
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 78227f32d6fa..4de2571938b8 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ 		},
+ 	},
+ 	{
++		/* Medion Akoya E7225 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
++		},
++	},
++	{
+ 		/* Blue FB5601 */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "blue"),
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index b361ce4ce511..4a10c1562d0f 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -648,7 +648,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+ 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ 	if (!cmd) {
+ 		DMERR("could not allocate metadata struct");
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+ 	atomic_set(&cmd->ref_count, 1);
+@@ -710,7 +710,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+ 		return cmd;
+ 
+ 	cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
+-	if (cmd) {
++	if (!IS_ERR(cmd)) {
+ 		mutex_lock(&table_lock);
+ 		cmd2 = lookup(bdev);
+ 		if (cmd2) {
+@@ -745,9 +745,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ {
+ 	struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
+ 						       may_format_device, policy_hint_size);
+-	if (cmd && !same_params(cmd, data_block_size)) {
++
++	if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
+ 		dm_cache_metadata_close(cmd);
+-		return NULL;
++		return ERR_PTR(-EINVAL);
+ 	}
+ 
+ 	return cmd;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 86a2a5e3b26b..39996ca58ce6 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2457,6 +2457,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
+ 	struct pool_c *pt = ti->private;
+ 	struct pool *pool = pt->pool;
+ 
++	if (get_pool_mode(pool) >= PM_READ_ONLY) {
++		DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
++		      dm_device_name(pool->pool_md));
++		return -EINVAL;
++	}
++
+ 	if (!strcasecmp(argv[0], "create_thin"))
+ 		r = process_create_thin_mesg(argc, argv, pool);
+ 
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index 63fb90b006ba..a3fb8b51038a 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -579,7 +579,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
+ 			  usb_sndbulkpipe(dev->udev,
+ 					  dev->bulk_out->bEndpointAddress),
+ 			  buf, msg->len,
+-			  kvaser_usb_simple_msg_callback, priv);
++			  kvaser_usb_simple_msg_callback, netdev);
+ 	usb_anchor_urb(urb, &priv->tx_submitted);
+ 
+ 	err = usb_submit_urb(urb, GFP_ATOMIC);
+@@ -654,11 +654,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+ 	priv = dev->nets[channel];
+ 	stats = &priv->netdev->stats;
+ 
+-	if (status & M16C_STATE_BUS_RESET) {
+-		kvaser_usb_unlink_tx_urbs(priv);
+-		return;
+-	}
+-
+ 	skb = alloc_can_err_skb(priv->netdev, &cf);
+ 	if (!skb) {
+ 		stats->rx_dropped++;
+@@ -669,7 +664,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+ 
+ 	netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
+ 
+-	if (status & M16C_STATE_BUS_OFF) {
++	if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
+ 		cf->can_id |= CAN_ERR_BUSOFF;
+ 
+ 		priv->can.can_stats.bus_off++;
+@@ -695,9 +690,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+ 		}
+ 
+ 		new_state = CAN_STATE_ERROR_PASSIVE;
+-	}
+-
+-	if (status == M16C_STATE_BUS_ERROR) {
++	} else if (status & M16C_STATE_BUS_ERROR) {
+ 		if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
+ 		    ((txerr >= 96) || (rxerr >= 96))) {
+ 			cf->can_id |= CAN_ERR_CRTL;
+@@ -707,7 +700,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
+ 
+ 			priv->can.can_stats.error_warning++;
+ 			new_state = CAN_STATE_ERROR_WARNING;
+-		} else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
++		} else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
++			   ((txerr < 96) && (rxerr < 96))) {
+ 			cf->can_id |= CAN_ERR_PROT;
+ 			cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ 
+@@ -1583,7 +1577,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
+ {
+ 	struct kvaser_usb *dev;
+ 	int err = -ENOMEM;
+-	int i;
++	int i, retry = 3;
+ 
+ 	dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
+ 	if (!dev)
+@@ -1601,7 +1595,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
+ 
+ 	usb_set_intfdata(intf, dev);
+ 
+-	err = kvaser_usb_get_software_info(dev);
++	/* On some x86 laptops, plugging a Kvaser device again after
++	 * an unplug makes the firmware always ignore the very first
++	 * command. For such a case, provide some room for retries
++	 * instead of completely exiting the driver.
++	 */
++	do {
++		err = kvaser_usb_get_software_info(dev);
++	} while (--retry && err == -ETIMEDOUT);
++
+ 	if (err) {
+ 		dev_err(&intf->dev,
+ 			"Cannot get software infos, error %d\n", err);
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index b1ab3a4956a5..e18240de159c 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -1293,6 +1293,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ 	if (vid == priv->data.default_vlan)
+ 		return 0;
+ 
++	if (priv->data.dual_emac) {
++		/* In dual EMAC, reserved VLAN id should not be used for
++		 * creating VLAN interfaces as this can break the dual
++		 * EMAC port separation
++		 */
++		int i;
++
++		for (i = 0; i < priv->data.slaves; i++) {
++			if (vid == priv->slaves[i].port_vlan)
++				return -EINVAL;
++		}
++	}
++
+ 	dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ 	return cpsw_add_vlan_ale_entry(priv, vid);
+ }
+@@ -1306,6 +1319,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ 	if (vid == priv->data.default_vlan)
+ 		return 0;
+ 
++	if (priv->data.dual_emac) {
++		int i;
++
++		for (i = 0; i < priv->data.slaves; i++) {
++			if (vid == priv->slaves[i].port_vlan)
++				return -EINVAL;
++		}
++	}
++
+ 	dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ 	ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
+ 	if (ret != 0)
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index a86d12326137..e873e8f0070d 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1410,7 +1410,7 @@ struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
+ }
+ EXPORT_SYMBOL_GPL(regulator_get_exclusive);
+ 
+-/* Locks held by regulator_put() */
++/* regulator_list_mutex lock held by regulator_put() */
+ static void _regulator_put(struct regulator *regulator)
+ {
+ 	struct regulator_dev *rdev;
+@@ -1425,12 +1425,14 @@ static void _regulator_put(struct regulator *regulator)
+ 	/* remove any sysfs entries */
+ 	if (regulator->dev)
+ 		sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
++	mutex_lock(&rdev->mutex);
+ 	kfree(regulator->supply_name);
+ 	list_del(&regulator->list);
+ 	kfree(regulator);
+ 
+ 	rdev->open_count--;
+ 	rdev->exclusive = 0;
++	mutex_unlock(&rdev->mutex);
+ 
+ 	module_put(rdev->owner);
+ }
+diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
+index 0791c92e8c50..1389fefe8814 100644
+--- a/drivers/spi/spi-dw-mid.c
++++ b/drivers/spi/spi-dw-mid.c
+@@ -222,7 +222,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
+ 	iounmap(clk_reg);
+ 
+ 	dws->num_cs = 16;
+-	dws->fifo_len = 40;	/* FIFO has 40 words buffer */
+ 
+ #ifdef CONFIG_SPI_DW_MID_DMA
+ 	dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index d26a2d195d21..cc42ee5e19fb 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -393,8 +393,8 @@ static void giveback(struct driver_data *drv_data)
+ 			cs_deassert(drv_data);
+ 	}
+ 
+-	spi_finalize_current_message(drv_data->master);
+ 	drv_data->cur_chip = NULL;
++	spi_finalize_current_message(drv_data->master);
+ }
+ 
+ static void reset_sccr1(struct driver_data *drv_data)
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index e14e105acff8..0493e8b1ba8f 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1360,6 +1360,9 @@ old_sess_out:
+ 		conn->sock = NULL;
+ 	}
+ 
++	if (conn->conn_transport->iscsit_wait_conn)
++		conn->conn_transport->iscsit_wait_conn(conn);
++
+ 	if (conn->conn_transport->iscsit_free_conn)
+ 		conn->conn_transport->iscsit_free_conn(conn);
+ 
+diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
+index 7c908141cc8a..9c02eb41ea90 100644
+--- a/drivers/target/loopback/tcm_loop.c
++++ b/drivers/target/loopback/tcm_loop.c
+@@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
+ 		goto out_done;
+ 	}
+ 
+-	tl_nexus = tl_hba->tl_nexus;
++	tl_nexus = tl_tpg->tl_nexus;
+ 	if (!tl_nexus) {
+ 		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
+ 				" does not exist\n");
+@@ -258,20 +258,20 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+ 	 */
+ 	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ 	/*
++	 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
++	 */
++	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
++	se_tpg = &tl_tpg->tl_se_tpg;
++	/*
+ 	 * Locate the tl_nexus and se_sess pointers
+ 	 */
+-	tl_nexus = tl_hba->tl_nexus;
++	tl_nexus = tl_tpg->tl_nexus;
+ 	if (!tl_nexus) {
+ 		pr_err("Unable to perform device reset without"
+ 				" active I_T Nexus\n");
+ 		return FAILED;
+ 	}
+ 	se_sess = tl_nexus->se_sess;
+-	/*
+-	 * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
+-	 */
+-	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+-	se_tpg = &tl_tpg->tl_se_tpg;
+ 
+ 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
+ 	if (!tl_cmd) {
+@@ -879,8 +879,8 @@ static int tcm_loop_make_nexus(
+ 	struct tcm_loop_nexus *tl_nexus;
+ 	int ret = -ENOMEM;
+ 
+-	if (tl_tpg->tl_hba->tl_nexus) {
+-		pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
++	if (tl_tpg->tl_nexus) {
++		pr_debug("tl_tpg->tl_nexus already exists\n");
+ 		return -EEXIST;
+ 	}
+ 	se_tpg = &tl_tpg->tl_se_tpg;
+@@ -915,7 +915,7 @@ static int tcm_loop_make_nexus(
+ 	 */
+ 	__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
+ 			tl_nexus->se_sess, tl_nexus);
+-	tl_tpg->tl_hba->tl_nexus = tl_nexus;
++	tl_tpg->tl_nexus = tl_nexus;
+ 	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+ 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+ 		name);
+@@ -931,9 +931,8 @@ static int tcm_loop_drop_nexus(
+ {
+ 	struct se_session *se_sess;
+ 	struct tcm_loop_nexus *tl_nexus;
+-	struct tcm_loop_hba *tl_hba = tpg->tl_hba;
+ 
+-	tl_nexus = tpg->tl_hba->tl_nexus;
++	tl_nexus = tpg->tl_nexus;
+ 	if (!tl_nexus)
+ 		return -ENODEV;
+ 
+@@ -949,13 +948,13 @@ static int tcm_loop_drop_nexus(
+ 	}
+ 
+ 	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+-		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
++		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
+ 		tl_nexus->se_sess->se_node_acl->initiatorname);
+ 	/*
+ 	 * Release the SCSI I_T Nexus to the emulated SAS Target Port
+ 	 */
+ 	transport_deregister_session(tl_nexus->se_sess);
+-	tpg->tl_hba->tl_nexus = NULL;
++	tpg->tl_nexus = NULL;
+ 	kfree(tl_nexus);
+ 	return 0;
+ }
+@@ -971,7 +970,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
+ 	struct tcm_loop_nexus *tl_nexus;
+ 	ssize_t ret;
+ 
+-	tl_nexus = tl_tpg->tl_hba->tl_nexus;
++	tl_nexus = tl_tpg->tl_nexus;
+ 	if (!tl_nexus)
+ 		return -ENODEV;
+ 
+diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
+index dd7a84ee78e1..4ed85886a1ee 100644
+--- a/drivers/target/loopback/tcm_loop.h
++++ b/drivers/target/loopback/tcm_loop.h
+@@ -25,11 +25,6 @@ struct tcm_loop_tmr {
+ };
+ 
+ struct tcm_loop_nexus {
+-	int it_nexus_active;
+-	/*
+-	 * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
+-	 */
+-	struct scsi_host *sh;
+ 	/*
+ 	 * Pointer to TCM session for I_T Nexus
+ 	 */
+@@ -45,6 +40,7 @@ struct tcm_loop_tpg {
+ 	atomic_t tl_tpg_port_count;
+ 	struct se_portal_group tl_se_tpg;
+ 	struct tcm_loop_hba *tl_hba;
++	struct tcm_loop_nexus *tl_nexus;
+ };
+ 
+ struct tcm_loop_hba {
+@@ -53,7 +49,6 @@ struct tcm_loop_hba {
+ 	struct se_hba_s *se_hba;
+ 	struct se_lun *tl_hba_lun;
+ 	struct se_port *tl_hba_lun_sep;
+-	struct tcm_loop_nexus *tl_nexus;
+ 	struct device dev;
+ 	struct Scsi_Host *sh;
+ 	struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 2be407e22eb4..4deb0c997b1b 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1037,10 +1037,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
+ 				" changed for TCM/pSCSI\n", dev);
+ 		return -EINVAL;
+ 	}
+-	if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
++	if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
+ 		pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
+-			" greater than fabric_max_sectors: %u\n", dev,
+-			optimal_sectors, dev->dev_attrib.fabric_max_sectors);
++			" greater than hw_max_sectors: %u\n", dev,
++			optimal_sectors, dev->dev_attrib.hw_max_sectors);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1442,7 +1442,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+ 				DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+ 	dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
+ 	dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
+-	dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+ 
+ 	return dev;
+ }
+@@ -1475,6 +1474,7 @@ int target_configure_device(struct se_device *dev)
+ 	dev->dev_attrib.hw_max_sectors =
+ 		se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
+ 					 dev->dev_attrib.hw_block_size);
++	dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
+ 
+ 	dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+ 	dev->creation_time = get_jiffies_64();
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 3b2879316b87..8baaa0a26d70 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -554,7 +554,16 @@ fd_execute_rw(struct se_cmd *cmd)
+ 	enum dma_data_direction data_direction = cmd->data_direction;
+ 	struct se_device *dev = cmd->se_dev;
+ 	int ret = 0;
+-
++	/*
++	 * We are currently limited by the number of iovecs (2048) per
++	 * single vfs_[writev,readv] call.
++	 */
++	if (cmd->data_length > FD_MAX_BYTES) {
++		pr_err("FILEIO: Not able to process I/O of %u bytes due to"
++		       "FD_MAX_BYTES: %u iovec count limitiation\n",
++			cmd->data_length, FD_MAX_BYTES);
++		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++	}
+ 	/*
+ 	 * Call vectorized fileio functions to map struct scatterlist
+ 	 * physical memory addresses to struct iovec virtual memory.
+diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
+index aa1620abec6d..b358b3d6c201 100644
+--- a/drivers/target/target_core_iblock.c
++++ b/drivers/target/target_core_iblock.c
+@@ -122,7 +122,7 @@ static int iblock_configure_device(struct se_device *dev)
+ 	q = bdev_get_queue(bd);
+ 
+ 	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
+-	dev->dev_attrib.hw_max_sectors = UINT_MAX;
++	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
+ 	dev->dev_attrib.hw_queue_depth = q->nr_requests;
+ 
+ 	/*
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 0ef75fb0ecba..92e6c510e5d0 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -561,21 +561,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ 	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+ 		unsigned long long end_lba;
+ 
+-		if (sectors > dev->dev_attrib.fabric_max_sectors) {
+-			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+-				" big sectors %u exceeds fabric_max_sectors:"
+-				" %u\n", cdb[0], sectors,
+-				dev->dev_attrib.fabric_max_sectors);
+-			return TCM_INVALID_CDB_FIELD;
+-		}
+-		if (sectors > dev->dev_attrib.hw_max_sectors) {
+-			printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+-				" big sectors %u exceeds backend hw_max_sectors:"
+-				" %u\n", cdb[0], sectors,
+-				dev->dev_attrib.hw_max_sectors);
+-			return TCM_INVALID_CDB_FIELD;
+-		}
+-
+ 		end_lba = dev->transport->get_blocks(dev) + 1;
+ 		if (cmd->t_task_lba + sectors > end_lba) {
+ 			pr_err("cmd exceeds last lba %llu "
+diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
+index 34254b2ec466..9998ae23cc7c 100644
+--- a/drivers/target/target_core_spc.c
++++ b/drivers/target/target_core_spc.c
+@@ -444,7 +444,6 @@ static sense_reason_t
+ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+-	u32 max_sectors;
+ 	int have_tp = 0;
+ 
+ 	/*
+@@ -469,9 +468,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+ 	/*
+ 	 * Set MAXIMUM TRANSFER LENGTH
+ 	 */
+-	max_sectors = min(dev->dev_attrib.fabric_max_sectors,
+-			  dev->dev_attrib.hw_max_sectors);
+-	put_unaligned_be32(max_sectors, &buf[8]);
++	put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
+ 
+ 	/*
+ 	 * Set OPTIMAL TRANSFER LENGTH
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 962c7e3c3baa..fb97bc0b80e7 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -820,6 +820,23 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+ 	return 0;
+ }
+ 
++static int vhost_scsi_to_tcm_attr(int attr)
++{
++	switch (attr) {
++	case VIRTIO_SCSI_S_SIMPLE:
++		return MSG_SIMPLE_TAG;
++	case VIRTIO_SCSI_S_ORDERED:
++		return MSG_ORDERED_TAG;
++	case VIRTIO_SCSI_S_HEAD:
++		return MSG_HEAD_TAG;
++	case VIRTIO_SCSI_S_ACA:
++		return MSG_ACA_TAG;
++	default:
++		break;
++	}
++	return MSG_SIMPLE_TAG;
++}
++
+ static void tcm_vhost_submission_work(struct work_struct *work)
+ {
+ 	struct tcm_vhost_cmd *tv_cmd =
+@@ -846,9 +863,9 @@ static void tcm_vhost_submission_work(struct work_struct *work)
+ 	rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
+ 			tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
+ 			tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
+-			tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
+-			0, sg_ptr, tv_cmd->tvc_sgl_count,
+-			sg_bidi_ptr, sg_no_bidi);
++			vhost_scsi_to_tcm_attr(tv_cmd->tvc_task_attr),
++			tv_cmd->tvc_data_direction, 0, sg_ptr,
++			tv_cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi);
+ 	if (rc < 0) {
+ 		transport_send_check_condition_and_sense(se_cmd,
+ 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+@@ -1150,6 +1167,7 @@ static int vhost_scsi_set_endpoint(
+ 	struct vhost_scsi *vs,
+ 	struct vhost_scsi_target *t)
+ {
++	struct se_portal_group *se_tpg;
+ 	struct tcm_vhost_tport *tv_tport;
+ 	struct tcm_vhost_tpg *tv_tpg;
+ 	struct tcm_vhost_tpg **vs_tpg;
+@@ -1197,6 +1215,21 @@ static int vhost_scsi_set_endpoint(
+ 				ret = -EEXIST;
+ 				goto out;
+ 			}
++			/*
++			 * In order to ensure individual vhost-scsi configfs
++			 * groups cannot be removed while in use by vhost ioctl,
++			 * go ahead and take an explicit se_tpg->tpg_group.cg_item
++			 * dependency now.
++			 */
++			se_tpg = &tv_tpg->se_tpg;
++			ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
++						   &se_tpg->tpg_group.cg_item);
++			if (ret) {
++				pr_warn("configfs_depend_item() failed: %d\n", ret);
++				kfree(vs_tpg);
++				mutex_unlock(&tv_tpg->tv_tpg_mutex);
++				goto out;
++			}
+ 			tv_tpg->tv_tpg_vhost_count++;
+ 			tv_tpg->vhost_scsi = vs;
+ 			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
+@@ -1240,6 +1273,7 @@ static int vhost_scsi_clear_endpoint(
+ 	struct vhost_scsi *vs,
+ 	struct vhost_scsi_target *t)
+ {
++	struct se_portal_group *se_tpg;
+ 	struct tcm_vhost_tport *tv_tport;
+ 	struct tcm_vhost_tpg *tv_tpg;
+ 	struct vhost_virtqueue *vq;
+@@ -1288,6 +1322,13 @@ static int vhost_scsi_clear_endpoint(
+ 		vs->vs_tpg[target] = NULL;
+ 		match = true;
+ 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
++		/*
++		 * Release se_tpg->tpg_group.cg_item configfs dependency now
++		 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
++		 */
++		se_tpg = &tv_tpg->se_tpg;
++		configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
++				       &se_tpg->tpg_group.cg_item);
+ 	}
+ 	if (match) {
+ 		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 725e87538c98..615c5079db7c 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -123,6 +123,12 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
+  */
+ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+ {
++	struct inode *inode = iocb->ki_filp->f_mapping->host;
++
++	/* we only support swap file calling nfs_direct_IO */
++	if (!IS_SWAPFILE(inode))
++		return 0;
++
+ #ifndef CONFIG_NFS_SWAP
+ 	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
+ 			iocb->ki_filp->f_path.dentry->d_name.name,
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 519833d0457e..5f8d5ffdad8f 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -462,7 +462,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
+ 			prev = pos;
+ 
+ 			status = nfs_wait_client_init_complete(pos);
+-			if (status == 0) {
++			if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
+ 				nfs4_schedule_lease_recovery(pos);
+ 				status = nfs4_wait_clnt_recover(pos);
+ 			}
+diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
+index 3ba30825f387..66c8c2fe86b7 100644
+--- a/fs/pstore/inode.c
++++ b/fs/pstore/inode.c
+@@ -178,6 +178,8 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
+ 	if (p->psi->erase)
+ 		p->psi->erase(p->type, p->id, p->count,
+ 			      dentry->d_inode->i_ctime, p->psi);
++	else
++		return -EPERM;
+ 
+ 	return simple_unlink(dir, dentry);
+ }
+@@ -334,9 +336,8 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
+ 
+ 	mutex_lock(&root->d_inode->i_mutex);
+ 
+-	rc = -ENOSPC;
+ 	dentry = d_alloc_name(root, name);
+-	if (IS_ERR(dentry))
++	if (!dentry)
+ 		goto fail_lockedalloc;
+ 
+ 	memcpy(private->data, data, size);
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index 42d5911c7e29..d3d37142bd93 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -92,6 +92,7 @@ struct ramoops_context {
+ 	struct persistent_ram_ecc_info ecc_info;
+ 	unsigned int max_dump_cnt;
+ 	unsigned int dump_write_cnt;
++	/* _read_cnt need clear on ramoops_pstore_open */
+ 	unsigned int dump_read_cnt;
+ 	unsigned int console_read_cnt;
+ 	unsigned int ftrace_read_cnt;
+@@ -107,6 +108,7 @@ static int ramoops_pstore_open(struct pstore_info *psi)
+ 
+ 	cxt->dump_read_cnt = 0;
+ 	cxt->console_read_cnt = 0;
++	cxt->ftrace_read_cnt = 0;
+ 	return 0;
+ }
+ 
+@@ -123,13 +125,15 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
+ 		return NULL;
+ 
+ 	prz = przs[i];
++	if (!prz)
++		return NULL;
+ 
+-	if (update) {
+-		/* Update old/shadowed buffer. */
++	/* Update old/shadowed buffer. */
++	if (update)
+ 		persistent_ram_save_old(prz);
+-		if (!persistent_ram_old_size(prz))
+-			return NULL;
+-	}
++
++	if (!persistent_ram_old_size(prz))
++		return NULL;
+ 
+ 	*typep = type;
+ 	*id = i;
+@@ -415,7 +419,6 @@ static int ramoops_probe(struct platform_device *pdev)
+ 	if (!is_power_of_2(pdata->ftrace_size))
+ 		pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
+ 
+-	cxt->dump_read_cnt = 0;
+ 	cxt->size = pdata->mem_size;
+ 	cxt->phys_addr = pdata->mem_address;
+ 	cxt->memtype = pdata->mem_type;
+diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
+index 6ff97553331b..bda61a759b68 100644
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -46,7 +46,7 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
+ }
+ 
+ /* increase and wrap the start pointer, returning the old value */
+-static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
++static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
+ {
+ 	int old;
+ 	int new;
+@@ -62,7 +62,7 @@ static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
+ }
+ 
+ /* increase the size counter until it hits the max size */
+-static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
++static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
+ {
+ 	size_t old;
+ 	size_t new;
+@@ -78,6 +78,53 @@ static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
+ 	} while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+ }
+ 
++static DEFINE_RAW_SPINLOCK(buffer_lock);
++
++/* increase and wrap the start pointer, returning the old value */
++static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
++{
++	int old;
++	int new;
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&buffer_lock, flags);
++
++	old = atomic_read(&prz->buffer->start);
++	new = old + a;
++	while (unlikely(new > prz->buffer_size))
++		new -= prz->buffer_size;
++	atomic_set(&prz->buffer->start, new);
++
++	raw_spin_unlock_irqrestore(&buffer_lock, flags);
++
++	return old;
++}
++
++/* increase the size counter until it hits the max size */
++static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
++{
++	size_t old;
++	size_t new;
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&buffer_lock, flags);
++
++	old = atomic_read(&prz->buffer->size);
++	if (old == prz->buffer_size)
++		goto exit;
++
++	new = old + a;
++	if (new > prz->buffer_size)
++		new = prz->buffer_size;
++	atomic_set(&prz->buffer->size, new);
++
++exit:
++	raw_spin_unlock_irqrestore(&buffer_lock, flags);
++}
++
++static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
++static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
++
+ static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+ 	uint8_t *data, size_t len, uint8_t *ecc)
+ {
+@@ -379,6 +426,9 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
+ 		return NULL;
+ 	}
+ 
++	buffer_start_add = buffer_start_add_locked;
++	buffer_size_add = buffer_size_add_locked;
++
+ 	if (memtype)
+ 		va = ioremap(start, size);
+ 	else
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index c2f9d6ca7e5e..16730a9c8cac 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1934,17 +1934,13 @@ static void pool_mayday_timeout(unsigned long __pool)
+  * spin_lock_irq(pool->lock) which may be released and regrabbed
+  * multiple times.  Does GFP_KERNEL allocations.  Called only from
+  * manager.
+- *
+- * RETURNS:
+- * %false if no action was taken and pool->lock stayed locked, %true
+- * otherwise.
+  */
+-static bool maybe_create_worker(struct worker_pool *pool)
++static void maybe_create_worker(struct worker_pool *pool)
+ __releases(&pool->lock)
+ __acquires(&pool->lock)
+ {
+ 	if (!need_to_create_worker(pool))
+-		return false;
++		return;
+ restart:
+ 	spin_unlock_irq(&pool->lock);
+ 
+@@ -1961,7 +1957,7 @@ restart:
+ 			start_worker(worker);
+ 			if (WARN_ON_ONCE(need_to_create_worker(pool)))
+ 				goto restart;
+-			return true;
++			return;
+ 		}
+ 
+ 		if (!need_to_create_worker(pool))
+@@ -1978,7 +1974,7 @@ restart:
+ 	spin_lock_irq(&pool->lock);
+ 	if (need_to_create_worker(pool))
+ 		goto restart;
+-	return true;
++	return;
+ }
+ 
+ /**
+@@ -1991,15 +1987,9 @@ restart:
+  * LOCKING:
+  * spin_lock_irq(pool->lock) which may be released and regrabbed
+  * multiple times.  Called only from manager.
+- *
+- * RETURNS:
+- * %false if no action was taken and pool->lock stayed locked, %true
+- * otherwise.
+  */
+-static bool maybe_destroy_workers(struct worker_pool *pool)
++static void maybe_destroy_workers(struct worker_pool *pool)
+ {
+-	bool ret = false;
+-
+ 	while (too_many_workers(pool)) {
+ 		struct worker *worker;
+ 		unsigned long expires;
+@@ -2013,10 +2003,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
+ 		}
+ 
+ 		destroy_worker(worker);
+-		ret = true;
+ 	}
+-
+-	return ret;
+ }
+ 
+ /**
+@@ -2036,13 +2023,14 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
+  * multiple times.  Does GFP_KERNEL allocations.
+  *
+  * RETURNS:
+- * spin_lock_irq(pool->lock) which may be released and regrabbed
+- * multiple times.  Does GFP_KERNEL allocations.
++ * %false if the pool doesn't need management and the caller can safely
++ * start processing works, %true if management function was performed and
++ * the conditions that the caller verified before calling the function may
++ * no longer be true.
+  */
+ static bool manage_workers(struct worker *worker)
+ {
+ 	struct worker_pool *pool = worker->pool;
+-	bool ret = false;
+ 
+ 	/*
+ 	 * Managership is governed by two mutexes - manager_arb and
+@@ -2066,7 +2054,7 @@ static bool manage_workers(struct worker *worker)
+ 	 * manager_mutex.
+ 	 */
+ 	if (!mutex_trylock(&pool->manager_arb))
+-		return ret;
++		return false;
+ 
+ 	/*
+ 	 * With manager arbitration won, manager_mutex would be free in
+@@ -2076,7 +2064,6 @@ static bool manage_workers(struct worker *worker)
+ 		spin_unlock_irq(&pool->lock);
+ 		mutex_lock(&pool->manager_mutex);
+ 		spin_lock_irq(&pool->lock);
+-		ret = true;
+ 	}
+ 
+ 	pool->flags &= ~POOL_MANAGE_WORKERS;
+@@ -2085,12 +2072,12 @@ static bool manage_workers(struct worker *worker)
+ 	 * Destroy and then create so that may_start_working() is true
+ 	 * on return.
+ 	 */
+-	ret |= maybe_destroy_workers(pool);
+-	ret |= maybe_create_worker(pool);
++	maybe_destroy_workers(pool);
++	maybe_create_worker(pool);
+ 
+ 	mutex_unlock(&pool->manager_mutex);
+ 	mutex_unlock(&pool->manager_arb);
+-	return ret;
++	return true;
+ }
+ 
+ /**
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 62aebed7c6e2..dc200bf831ae 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -2629,6 +2629,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
+ 	if (!rdev->ops->get_key)
+ 		return -EOPNOTSUPP;
+ 
++	if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
++		return -ENOENT;
++
+ 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ 	if (!msg)
+ 		return -ENOMEM;
+@@ -2648,10 +2651,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
+ 	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
+ 		goto nla_put_failure;
+ 
+-	if (pairwise && mac_addr &&
+-	    !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+-		return -ENOENT;
+-
+ 	err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
+ 			   get_key_callback);
+ 
+@@ -2822,7 +2821,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
+ 	wdev_lock(dev->ieee80211_ptr);
+ 	err = nl80211_key_allowed(dev->ieee80211_ptr);
+ 
+-	if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
++	if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
+ 	    !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+ 		err = -ENOENT;
+ 
+diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
+index dbc550716790..f60d81497f28 100644
+--- a/sound/core/seq/seq_dummy.c
++++ b/sound/core/seq/seq_dummy.c
+@@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
+ static int my_client = -1;
+ 
+ /*
+- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
+- * to subscribers.
+- * Note: this callback is called only after all subscribers are removed.
+- */
+-static int
+-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
+-{
+-	struct snd_seq_dummy_port *p;
+-	int i;
+-	struct snd_seq_event ev;
+-
+-	p = private_data;
+-	memset(&ev, 0, sizeof(ev));
+-	if (p->duplex)
+-		ev.source.port = p->connect;
+-	else
+-		ev.source.port = p->port;
+-	ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
+-	ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
+-	for (i = 0; i < 16; i++) {
+-		ev.data.control.channel = i;
+-		ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
+-		snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
+-		ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
+-		snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
+-	}
+-	return 0;
+-}
+-
+-/*
+  * event input callback - just redirect events to subscribers
+  */
+ static int
+@@ -175,7 +145,6 @@ create_port(int idx, int type)
+ 		| SNDRV_SEQ_PORT_TYPE_PORT;
+ 	memset(&pcb, 0, sizeof(pcb));
+ 	pcb.owner = THIS_MODULE;
+-	pcb.unuse = dummy_unuse;
+ 	pcb.event_input = dummy_input;
+ 	pcb.private_free = dummy_free;
+ 	pcb.private_data = rec;
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index 5e5af898f7f8..412d90f7b256 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -555,7 +555,7 @@ static struct {
+ 	{ 22050, 2 },
+ 	{ 24000, 2 },
+ 	{ 16000, 3 },
+-	{ 11250, 4 },
++	{ 11025, 4 },
+ 	{ 12000, 4 },
+ 	{  8000, 5 },
+ };