public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:3.10 commit in: /
Date: Mon, 17 Aug 2015 22:08:13 +0000 (UTC)	[thread overview]
Message-ID: <1439848535.88995e4f17dee7d97d16d8029ddf0bc0c0a6de68.mpagano@gentoo> (raw)

commit:     88995e4f17dee7d97d16d8029ddf0bc0c0a6de68
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Aug 17 21:55:35 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Aug 17 21:55:35 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=88995e4f

Linux patch 3.10.87

 0000_README              |    4 +
 1086_linux-3.10.87.patch | 1317 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1321 insertions(+)

diff --git a/0000_README b/0000_README
index e519e9a..f2882cc 100644
--- a/0000_README
+++ b/0000_README
@@ -386,6 +386,10 @@ Patch:  1085_linux-3.10.86.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.10.86
 
+Patch:  1086_linux-3.10.87.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.10.87
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1086_linux-3.10.87.patch b/1086_linux-3.10.87.patch
new file mode 100644
index 0000000..c17a77d
--- /dev/null
+++ b/1086_linux-3.10.87.patch
@@ -0,0 +1,1317 @@
+diff --git a/Makefile b/Makefile
+index 25ee724c9089..0d4fd6427349 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 10
+-SUBLEVEL = 86
++SUBLEVEL = 87
+ EXTRAVERSION =
+ NAME = TOSSUG Baby Fish
+ 
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 32640ae7750f..03a1e26ba3a3 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -358,7 +358,8 @@ ENDPROC(__pabt_svc)
+ 	.endm
+ 
+ 	.macro	kuser_cmpxchg_check
+-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
++#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
++    !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+ #ifndef CONFIG_MMU
+ #warning "NPTL on non MMU needs fixing"
+ #else
+diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
+index 25442f451148..918875d96d5d 100644
+--- a/arch/arm/kernel/fiq.c
++++ b/arch/arm/kernel/fiq.c
+@@ -84,17 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec)
+ 
+ void set_fiq_handler(void *start, unsigned int length)
+ {
+-#if defined(CONFIG_CPU_USE_DOMAINS)
+-	void *base = (void *)0xffff0000;
+-#else
+ 	void *base = vectors_page;
+-#endif
+ 	unsigned offset = FIQ_OFFSET;
+ 
+ 	memcpy(base + offset, start, length);
++	if (!cache_is_vipt_nonaliasing())
++		flush_icache_range((unsigned long)base + offset, offset +
++				   length);
+ 	flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
+-	if (!vectors_high())
+-		flush_icache_range(offset, offset + length);
+ }
+ 
+ int claim_fiq(struct fiq_handler *f)
+diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h
+index 2022e092f0ca..db09170e3832 100644
+--- a/arch/arm/mach-realview/include/mach/memory.h
++++ b/arch/arm/mach-realview/include/mach/memory.h
+@@ -56,6 +56,8 @@
+ #define PAGE_OFFSET1	(PAGE_OFFSET + 0x10000000)
+ #define PAGE_OFFSET2	(PAGE_OFFSET + 0x30000000)
+ 
++#define PHYS_OFFSET PLAT_PHYS_OFFSET
++
+ #define __phys_to_virt(phys)						\
+ 	((phys) >= 0x80000000 ?	(phys) - 0x80000000 + PAGE_OFFSET2 :	\
+ 	 (phys) >= 0x20000000 ?	(phys) - 0x20000000 + PAGE_OFFSET1 :	\
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index 3d478102b1c0..b9564b8d6bab 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -193,7 +193,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ 		 * Other callers might not initialize the si_lsb field,
+ 		 * so check explicitely for the right codes here.
+ 		 */
+-		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
++		if (from->si_signo == SIGBUS &&
++		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
+ 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
+ #endif
+ 		break;
+@@ -220,8 +221,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ 
+ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ {
+-	memset(to, 0, sizeof *to);
+-
+ 	if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
+ 	    copy_from_user(to->_sifields._pad,
+ 			   from->_sifields._pad, SI_PAD_SIZE))
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 8b8f6b393363..e821de7cb14e 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -150,8 +150,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
+ 		 * Make sure the buddy is global too (if it's !none,
+ 		 * it better already be global)
+ 		 */
++#ifdef CONFIG_SMP
++		/*
++		 * For SMP, multiple CPUs can race, so we need to do
++		 * this atomically.
++		 */
++#ifdef CONFIG_64BIT
++#define LL_INSN "lld"
++#define SC_INSN "scd"
++#else /* CONFIG_32BIT */
++#define LL_INSN "ll"
++#define SC_INSN "sc"
++#endif
++		unsigned long page_global = _PAGE_GLOBAL;
++		unsigned long tmp;
++
++		__asm__ __volatile__ (
++			"	.set	push\n"
++			"	.set	noreorder\n"
++			"1:	" LL_INSN "	%[tmp], %[buddy]\n"
++			"	bnez	%[tmp], 2f\n"
++			"	 or	%[tmp], %[tmp], %[global]\n"
++			"	" SC_INSN "	%[tmp], %[buddy]\n"
++			"	beqz	%[tmp], 1b\n"
++			"	 nop\n"
++			"2:\n"
++			"	.set pop"
++			: [buddy] "+m" (buddy->pte),
++			  [tmp] "=&r" (tmp)
++			: [global] "r" (page_global));
++#else /* !CONFIG_SMP */
+ 		if (pte_none(*buddy))
+ 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
++#endif /* CONFIG_SMP */
+ 	}
+ #endif
+ }
+diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
+index fd814e08c945..0f3e030f232b 100644
+--- a/arch/mips/kernel/mips-mt-fpaff.c
++++ b/arch/mips/kernel/mips-mt-fpaff.c
+@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ 				      unsigned long __user *user_mask_ptr)
+ {
+ 	unsigned int real_len;
+-	cpumask_t mask;
++	cpumask_t allowed, mask;
+ 	int retval;
+ 	struct task_struct *p;
+ 
+@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ 	if (retval)
+ 		goto out_unlock;
+ 
+-	cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
++	cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
++	cpumask_and(&mask, &allowed, cpu_active_mask);
+ 
+ out_unlock:
+ 	read_unlock(&tasklist_lock);
+diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
+index 57de8b751627..41f8708d21a8 100644
+--- a/arch/mips/kernel/signal32.c
++++ b/arch/mips/kernel/signal32.c
+@@ -368,8 +368,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+ 
+ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ {
+-	memset(to, 0, sizeof *to);
+-
+ 	if (copy_from_user(to, from, 3*sizeof(int)) ||
+ 	    copy_from_user(to->_sifields._pad,
+ 			   from->_sifields._pad, SI_PAD_SIZE32))
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 81f929f026f2..d9b673b06757 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -949,8 +949,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
+ 
+ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
+ {
+-	memset(to, 0, sizeof *to);
+-
+ 	if (copy_from_user(to, from, 3*sizeof(int)) ||
+ 	    copy_from_user(to->_sifields._pad,
+ 			   from->_sifields._pad, SI_PAD_SIZE32))
+diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
+index 39ca301920db..50d6f16a1513 100644
+--- a/arch/sparc/include/asm/visasm.h
++++ b/arch/sparc/include/asm/visasm.h
+@@ -28,18 +28,20 @@
+  * Must preserve %o5 between VISEntryHalf and VISExitHalf */
+ 
+ #define VISEntryHalf					\
++	VISEntry
++
++#define VISExitHalf					\
++	VISExit
++
++#define VISEntryHalfFast(fail_label)			\
+ 	rd		%fprs, %o5;			\
+ 	andcc		%o5, FPRS_FEF, %g0;		\
+ 	be,pt		%icc, 297f;			\
+-	 sethi		%hi(298f), %g7;			\
+-	sethi		%hi(VISenterhalf), %g1;		\
+-	jmpl		%g1 + %lo(VISenterhalf), %g0;	\
+-	 or		%g7, %lo(298f), %g7;		\
+-	clr		%o5;				\
+-297:	wr		%o5, FPRS_FEF, %fprs;		\
+-298:
++	 nop;						\
++	ba,a,pt		%xcc, fail_label;		\
++297:	wr		%o5, FPRS_FEF, %fprs;
+ 
+-#define VISExitHalf					\
++#define VISExitHalfFast					\
+ 	wr		%o5, 0, %fprs;
+ 
+ #ifndef __ASSEMBLY__
+diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
+index 9cf2ee01cee3..83aeeb1dffdb 100644
+--- a/arch/sparc/lib/NG4memcpy.S
++++ b/arch/sparc/lib/NG4memcpy.S
+@@ -41,6 +41,10 @@
+ #endif
+ #endif
+ 
++#if !defined(EX_LD) && !defined(EX_ST)
++#define NON_USER_COPY
++#endif
++
+ #ifndef EX_LD
+ #define EX_LD(x)	x
+ #endif
+@@ -197,9 +201,13 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+ 	 mov		EX_RETVAL(%o3), %o0
+ 
+ .Llarge_src_unaligned:
++#ifdef NON_USER_COPY
++	VISEntryHalfFast(.Lmedium_vis_entry_fail)
++#else
++	VISEntryHalf
++#endif
+ 	andn		%o2, 0x3f, %o4
+ 	sub		%o2, %o4, %o2
+-	VISEntryHalf
+ 	alignaddr	%o1, %g0, %g1
+ 	add		%o1, %o4, %o1
+ 	EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
+@@ -232,14 +240,21 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+ 	add		%o0, 0x40, %o0
+ 	bne,pt		%icc, 1b
+ 	 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
++#ifdef NON_USER_COPY
++	VISExitHalfFast
++#else
+ 	VISExitHalf
+-
++#endif
+ 	brz,pn		%o2, .Lexit
+ 	 cmp		%o2, 19
+ 	ble,pn		%icc, .Lsmall_unaligned
+ 	 nop
+ 	ba,a,pt		%icc, .Lmedium_unaligned
+ 
++#ifdef NON_USER_COPY
++.Lmedium_vis_entry_fail:
++	 or		%o0, %o1, %g2
++#endif
+ .Lmedium:
+ 	LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
+ 	andcc		%g2, 0x7, %g0
+diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
+index b320ae9e2e2e..a063d84336d6 100644
+--- a/arch/sparc/lib/VISsave.S
++++ b/arch/sparc/lib/VISsave.S
+@@ -44,9 +44,8 @@ vis1:	ldub		[%g6 + TI_FPSAVED], %g3
+ 
+ 	 stx		%g3, [%g6 + TI_GSR]
+ 2:	add		%g6, %g1, %g3
+-	cmp		%o5, FPRS_DU
+-	be,pn		%icc, 6f
+-	 sll		%g1, 3, %g1
++	mov		FPRS_DU | FPRS_DL | FPRS_FEF, %o5
++	sll		%g1, 3, %g1
+ 	stb		%o5, [%g3 + TI_FPSAVED]
+ 	rd		%gsr, %g2
+ 	add		%g6, %g1, %g3
+@@ -80,65 +79,3 @@ vis1:	ldub		[%g6 + TI_FPSAVED], %g3
+ 	.align		32
+ 80:	jmpl		%g7 + %g0, %g0
+ 	 nop
+-
+-6:	ldub		[%g3 + TI_FPSAVED], %o5
+-	or		%o5, FPRS_DU, %o5
+-	add		%g6, TI_FPREGS+0x80, %g2
+-	stb		%o5, [%g3 + TI_FPSAVED]
+-
+-	sll		%g1, 5, %g1
+-	add		%g6, TI_FPREGS+0xc0, %g3
+-	wr		%g0, FPRS_FEF, %fprs
+-	membar		#Sync
+-	stda		%f32, [%g2 + %g1] ASI_BLK_P
+-	stda		%f48, [%g3 + %g1] ASI_BLK_P
+-	membar		#Sync
+-	ba,pt		%xcc, 80f
+-	 nop
+-
+-	.align		32
+-80:	jmpl		%g7 + %g0, %g0
+-	 nop
+-
+-	.align		32
+-VISenterhalf:
+-	ldub		[%g6 + TI_FPDEPTH], %g1
+-	brnz,a,pn	%g1, 1f
+-	 cmp		%g1, 1
+-	stb		%g0, [%g6 + TI_FPSAVED]
+-	stx		%fsr, [%g6 + TI_XFSR]
+-	clr		%o5
+-	jmpl		%g7 + %g0, %g0
+-	 wr		%g0, FPRS_FEF, %fprs
+-
+-1:	bne,pn		%icc, 2f
+-	 srl		%g1, 1, %g1
+-	ba,pt		%xcc, vis1
+-	 sub		%g7, 8, %g7
+-2:	addcc		%g6, %g1, %g3
+-	sll		%g1, 3, %g1
+-	andn		%o5, FPRS_DU, %g2
+-	stb		%g2, [%g3 + TI_FPSAVED]
+-
+-	rd		%gsr, %g2
+-	add		%g6, %g1, %g3
+-	stx		%g2, [%g3 + TI_GSR]
+-	add		%g6, %g1, %g2
+-	stx		%fsr, [%g2 + TI_XFSR]
+-	sll		%g1, 5, %g1
+-3:	andcc		%o5, FPRS_DL, %g0
+-	be,pn		%icc, 4f
+-	 add		%g6, TI_FPREGS, %g2
+-
+-	add		%g6, TI_FPREGS+0x40, %g3
+-	membar		#Sync
+-	stda		%f0, [%g2 + %g1] ASI_BLK_P
+-	stda		%f16, [%g3 + %g1] ASI_BLK_P
+-	membar		#Sync
+-	ba,pt		%xcc, 4f
+-	 nop
+-
+-	.align		32
+-4:	and		%o5, FPRS_DU, %o5
+-	jmpl		%g7 + %g0, %g0
+-	 wr		%o5, FPRS_FEF, %fprs
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index 323335b9cd2b..ac094de28ccf 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -126,10 +126,6 @@ EXPORT_SYMBOL(copy_user_page);
+ void VISenter(void);
+ EXPORT_SYMBOL(VISenter);
+ 
+-/* CRYPTO code needs this */
+-void VISenterhalf(void);
+-EXPORT_SYMBOL(VISenterhalf);
+-
+ extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
+ extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
+ 		unsigned long *);
+diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
+index 7e28d9467bb4..4cbe03287b08 100644
+--- a/arch/x86/kernel/cpu/perf_event_amd.c
++++ b/arch/x86/kernel/cpu/perf_event_amd.c
+@@ -648,48 +648,48 @@ static __initconst const struct x86_pmu amd_pmu = {
+ 	.cpu_dead		= amd_pmu_cpu_dead,
+ };
+ 
+-static int setup_event_constraints(void)
++static int __init amd_core_pmu_init(void)
+ {
+-	if (boot_cpu_data.x86 == 0x15)
++	if (!cpu_has_perfctr_core)
++		return 0;
++
++	switch (boot_cpu_data.x86) {
++	case 0x15:
++		pr_cont("Fam15h ");
+ 		x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
+-	return 0;
+-}
++		break;
+ 
+-static int setup_perfctr_core(void)
+-{
+-	if (!cpu_has_perfctr_core) {
+-		WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h,
+-		     KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!");
++	default:
++		pr_err("core perfctr but no constraints; unknown hardware!\n");
+ 		return -ENODEV;
+ 	}
+ 
+-	WARN(x86_pmu.get_event_constraints == amd_get_event_constraints,
+-	     KERN_ERR "hw perf events core counters need constraints handler!");
+-
+ 	/*
+ 	 * If core performance counter extensions exists, we must use
+ 	 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
+-	 * x86_pmu_addr_offset().
++	 * amd_pmu_addr_offset().
+ 	 */
+ 	x86_pmu.eventsel	= MSR_F15H_PERF_CTL;
+ 	x86_pmu.perfctr		= MSR_F15H_PERF_CTR;
+ 	x86_pmu.num_counters	= AMD64_NUM_COUNTERS_CORE;
+ 
+-	printk(KERN_INFO "perf: AMD core performance counters detected\n");
+-
++	pr_cont("core perfctr, ");
+ 	return 0;
+ }
+ 
+ __init int amd_pmu_init(void)
+ {
++	int ret;
++
+ 	/* Performance-monitoring supported from K7 and later: */
+ 	if (boot_cpu_data.x86 < 6)
+ 		return -ENODEV;
+ 
+ 	x86_pmu = amd_pmu;
+ 
+-	setup_event_constraints();
+-	setup_perfctr_core();
++	ret = amd_core_pmu_init();
++	if (ret)
++		return ret;
+ 
+ 	/* Events are common for all AMDs */
+ 	memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index c8b0d0d2da5c..fc87568fc409 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -165,7 +165,7 @@ static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
+ 
+ static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
+ {
+-	return vcpu->arch.apic->pending_events;
++	return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
+ }
+ 
+ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index a492be2635ac..13d926282c89 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -481,6 +481,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 	pte_t pte;
+ 	unsigned long pfn;
+ 	struct page *page;
++	unsigned char dummy;
+ 
+ 	ptep = lookup_address((unsigned long)v, &level);
+ 	BUG_ON(ptep == NULL);
+@@ -490,6 +491,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 
+ 	pte = pfn_pte(pfn, prot);
+ 
++	/*
++	 * Careful: update_va_mapping() will fail if the virtual address
++	 * we're poking isn't populated in the page tables.  We don't
++	 * need to worry about the direct map (that's always in the page
++	 * tables), but we need to be careful about vmap space.  In
++	 * particular, the top level page table can lazily propagate
++	 * entries between processes, so if we've switched mms since we
++	 * vmapped the target in the first place, we might not have the
++	 * top-level page table entry populated.
++	 *
++	 * We disable preemption because we want the same mm active when
++	 * we probe the target and when we issue the hypercall.  We'll
++	 * have the same nominal mm, but if we're a kernel thread, lazy
++	 * mm dropping could change our pgd.
++	 *
++	 * Out of an abundance of caution, this uses __get_user() to fault
++	 * in the target address just in case there's some obscure case
++	 * in which the target address isn't readable.
++	 */
++
++	preempt_disable();
++
++	pagefault_disable();	/* Avoid warnings due to being atomic. */
++	__get_user(dummy, (unsigned char __user __force *)v);
++	pagefault_enable();
++
+ 	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
+ 		BUG();
+ 
+@@ -501,6 +528,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 				BUG();
+ 	} else
+ 		kmap_flush_unused();
++
++	preempt_enable();
+ }
+ 
+ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+@@ -508,6 +537,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+ 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+ 	int i;
+ 
++	/*
++	 * We need to mark the all aliases of the LDT pages RO.  We
++	 * don't need to call vm_flush_aliases(), though, since that's
++	 * only responsible for flushing aliases out the TLBs, not the
++	 * page tables, and Xen will flush the TLB for us if needed.
++	 *
++	 * To avoid confusing future readers: none of this is necessary
++	 * to load the LDT.  The hypervisor only checks this when the
++	 * LDT is faulted in due to subsequent descriptor access.
++	 */
++
+ 	for(i = 0; i < entries; i += entries_per_page)
+ 		set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
+ }
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index f78cbbb88bd4..01677543248d 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -457,6 +457,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
+ #  define rbd_assert(expr)	((void) 0)
+ #endif /* !RBD_DEBUG */
+ 
++static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
+ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
+ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
+ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
+@@ -1670,6 +1671,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
+ 	obj_request_done_set(obj_request);
+ }
+ 
++static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
++{
++	dout("%s: obj %p\n", __func__, obj_request);
++
++	if (obj_request_img_data_test(obj_request))
++		rbd_osd_copyup_callback(obj_request);
++	else
++		obj_request_done_set(obj_request);
++}
++
+ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+ 				struct ceph_msg *msg)
+ {
+@@ -1708,6 +1719,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+ 		rbd_osd_stat_callback(obj_request);
+ 		break;
+ 	case CEPH_OSD_OP_CALL:
++		rbd_osd_call_callback(obj_request);
++		break;
+ 	case CEPH_OSD_OP_NOTIFY_ACK:
+ 	case CEPH_OSD_OP_WATCH:
+ 		rbd_osd_trivial_callback(obj_request);
+@@ -2305,13 +2318,15 @@ out_unwind:
+ }
+ 
+ static void
+-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
++rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
+ {
+ 	struct rbd_img_request *img_request;
+ 	struct rbd_device *rbd_dev;
+ 	struct page **pages;
+ 	u32 page_count;
+ 
++	dout("%s: obj %p\n", __func__, obj_request);
++
+ 	rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
+ 	rbd_assert(obj_request_img_data_test(obj_request));
+ 	img_request = obj_request->img_request;
+@@ -2337,9 +2352,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+ 	if (!obj_request->result)
+ 		obj_request->xferred = obj_request->length;
+ 
+-	/* Finish up with the normal image object callback */
+-
+-	rbd_img_obj_callback(obj_request);
++	obj_request_done_set(obj_request);
+ }
+ 
+ static void
+@@ -2436,7 +2449,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
+ 
+ 	/* All set, send it off. */
+ 
+-	orig_request->callback = rbd_img_obj_copyup_callback;
+ 	osdc = &rbd_dev->rbd_client->client->osdc;
+ 	img_result = rbd_obj_request_submit(osdc, orig_request);
+ 	if (!img_result)
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 40b3f756f904..02cc352d8bcc 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -2717,7 +2717,7 @@ static int wait_for_msg_done(struct smi_info *smi_info)
+ 		    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+ 			schedule_timeout_uninterruptible(1);
+ 			smi_result = smi_info->handlers->event(
+-				smi_info->si_sm, 100);
++				smi_info->si_sm, jiffies_to_usecs(1));
+ 		} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ 			smi_result = smi_info->handlers->event(
+ 				smi_info->si_sm, 0);
+diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
+index 21180d6cad6e..7cb51b3bb79e 100644
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -915,7 +915,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
+ 		crypt->mode |= NPE_OP_NOT_IN_PLACE;
+ 		/* This was never tested by Intel
+ 		 * for more than one dst buffer, I think. */
+-		BUG_ON(req->dst->length < nbytes);
+ 		req_ctx->dst = NULL;
+ 		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+ 					flags, DMA_FROM_DEVICE))
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 68ce36056019..8cac69819054 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -1271,10 +1271,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+ 
+ 			if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
+ 			    (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
++				u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
++
++				if (hss > lvds->native_mode.hdisplay)
++					hss = (10 - 1) * 8;
++
+ 				lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ 					(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+ 				lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+-					(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
++					hss;
+ 				lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ 					(RBIOS8(tmp + 23) * 8);
+ 
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index a79cbd6038f6..37470ee7c850 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -564,6 +564,8 @@ static int bitmap_read_sb(struct bitmap *bitmap)
+ 	if (err)
+ 		return err;
+ 
++	err = -EINVAL;
++
+ 	sb = kmap_atomic(sb_page);
+ 
+ 	chunksize = le32_to_cpu(sb->chunksize);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 631fe3e9c6e5..37ff00d014b4 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -5628,9 +5628,9 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
+ 	int err = -ENOMEM;
+ 
+ 	if (md_allow_write(mddev))
+-		file = kmalloc(sizeof(*file), GFP_NOIO);
++		file = kzalloc(sizeof(*file), GFP_NOIO);
+ 	else
+-		file = kmalloc(sizeof(*file), GFP_KERNEL);
++		file = kzalloc(sizeof(*file), GFP_KERNEL);
+ 
+ 	if (!file)
+ 		goto out;
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index fa58438b298a..72141ee60705 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1382,6 +1382,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ {
+ 	char b[BDEVNAME_SIZE];
+ 	struct r1conf *conf = mddev->private;
++	unsigned long flags;
+ 
+ 	/*
+ 	 * If it is not operational, then we have already marked it as dead
+@@ -1401,14 +1402,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ 		return;
+ 	}
+ 	set_bit(Blocked, &rdev->flags);
++	spin_lock_irqsave(&conf->device_lock, flags);
+ 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
+-		unsigned long flags;
+-		spin_lock_irqsave(&conf->device_lock, flags);
+ 		mddev->degraded++;
+ 		set_bit(Faulty, &rdev->flags);
+-		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 	} else
+ 		set_bit(Faulty, &rdev->flags);
++	spin_unlock_irqrestore(&conf->device_lock, flags);
+ 	/*
+ 	 * if recovery is running, make sure it aborts.
+ 	 */
+@@ -1466,7 +1466,10 @@ static int raid1_spare_active(struct mddev *mddev)
+ 	 * Find all failed disks within the RAID1 configuration 
+ 	 * and mark them readable.
+ 	 * Called under mddev lock, so rcu protection not needed.
++	 * device_lock used to avoid races with raid1_end_read_request
++	 * which expects 'In_sync' flags and ->degraded to be consistent.
+ 	 */
++	spin_lock_irqsave(&conf->device_lock, flags);
+ 	for (i = 0; i < conf->raid_disks; i++) {
+ 		struct md_rdev *rdev = conf->mirrors[i].rdev;
+ 		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
+@@ -1496,7 +1499,6 @@ static int raid1_spare_active(struct mddev *mddev)
+ 			sysfs_notify_dirent_safe(rdev->sysfs_state);
+ 		}
+ 	}
+-	spin_lock_irqsave(&conf->device_lock, flags);
+ 	mddev->degraded -= count;
+ 	spin_unlock_irqrestore(&conf->device_lock, flags);
+ 
+diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
+index 9816c232e583..c04e08d1d0fa 100644
+--- a/drivers/mfd/sm501.c
++++ b/drivers/mfd/sm501.c
+@@ -1232,7 +1232,7 @@ static ssize_t sm501_dbg_regs(struct device *dev,
+ }
+ 
+ 
+-static DEVICE_ATTR(dbg_regs, 0666, sm501_dbg_regs, NULL);
++static DEVICE_ATTR(dbg_regs, 0444, sm501_dbg_regs, NULL);
+ 
+ /* sm501_init_reg
+  *
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index f7732f3b9804..4a79a5f0d95e 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -554,9 +554,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
+ {
+ 	struct ipr_trace_entry *trace_entry;
+ 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
++	unsigned int trace_index;
+ 
+-	trace_entry = &ioa_cfg->trace[atomic_add_return
+-			(1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
++	trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
++	trace_entry = &ioa_cfg->trace[trace_index];
+ 	trace_entry->time = jiffies;
+ 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
+ 	trace_entry->type = type;
+@@ -1006,10 +1007,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
+ 
+ static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+ {
++	unsigned int hrrq;
++
+ 	if (ioa_cfg->hrrq_num == 1)
+-		return 0;
+-	else
+-		return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
++		hrrq = 0;
++	else {
++		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
++		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
++	}
++	return hrrq;
+ }
+ 
+ /**
+@@ -6141,21 +6147,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
+ 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+ 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+-	unsigned long hrrq_flags;
++	unsigned long lock_flags;
+ 
+ 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
+ 
+ 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
+ 		scsi_dma_unmap(scsi_cmd);
+ 
+-		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
+ 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ 		scsi_cmd->scsi_done(scsi_cmd);
+-		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
+ 	} else {
+-		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
++		spin_lock(&ipr_cmd->hrrq->_lock);
+ 		ipr_erp_start(ioa_cfg, ipr_cmd);
+-		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_unlock(&ipr_cmd->hrrq->_lock);
++		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ 	}
+ }
+ 
+diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
+index c19911554036..e045676d8325 100644
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -1452,6 +1452,7 @@ struct ipr_ioa_cfg {
+ 
+ #define IPR_NUM_TRACE_INDEX_BITS	8
+ #define IPR_NUM_TRACE_ENTRIES		(1 << IPR_NUM_TRACE_INDEX_BITS)
++#define IPR_TRACE_INDEX_MASK		(IPR_NUM_TRACE_ENTRIES - 1)
+ #define IPR_TRACE_SIZE	(sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
+ 	char trace_start[8];
+ #define IPR_TRACE_START_LABEL			"trace"
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index eb81c98386b9..721d839d6c54 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1694,6 +1694,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
+ 			md->from_user = 0;
+ 	}
+ 
++	if (unlikely(iov_count > UIO_MAXIOV))
++		return -EINVAL;
++
+ 	if (iov_count) {
+ 		int len, size = sizeof(struct sg_iovec) * iov_count;
+ 		struct iovec *iov;
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index efca110342cb..06cd916f91fe 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3874,7 +3874,13 @@ get_immediate:
+ 	}
+ 
+ transport_err:
+-	iscsit_take_action_for_connection_exit(conn);
++	/*
++	 * Avoid the normal connection failure code-path if this connection
++	 * is still within LOGIN mode, and iscsi_np process context is
++	 * responsible for cleaning up the early connection failure.
++	 */
++	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
++		iscsit_take_action_for_connection_exit(conn);
+ out:
+ 	return 0;
+ }
+@@ -3956,7 +3962,7 @@ reject:
+ 
+ int iscsi_target_rx_thread(void *arg)
+ {
+-	int ret;
++	int ret, rc;
+ 	u8 buffer[ISCSI_HDR_LEN], opcode;
+ 	u32 checksum = 0, digest = 0;
+ 	struct iscsi_conn *conn = arg;
+@@ -3966,10 +3972,16 @@ int iscsi_target_rx_thread(void *arg)
+ 	 * connection recovery / failure event can be triggered externally.
+ 	 */
+ 	allow_signal(SIGINT);
++	/*
++	 * Wait for iscsi_post_login_handler() to complete before allowing
++	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
++	 */
++	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
++	if (rc < 0)
++		return 0;
+ 
+ 	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+ 		struct completion comp;
+-		int rc;
+ 
+ 		init_completion(&comp);
+ 		rc = wait_for_completion_interruptible(&comp);
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 815bf5b1a4ae..bf93e1c1ff97 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -589,6 +589,7 @@ struct iscsi_conn {
+ 	int			bitmap_id;
+ 	int			rx_thread_active;
+ 	struct task_struct	*rx_thread;
++	struct completion	rx_login_comp;
+ 	int			tx_thread_active;
+ 	struct task_struct	*tx_thread;
+ 	/* list_head for session connection list */
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 797b2e2acc35..2c4db62e327e 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -84,6 +84,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
+ 	init_completion(&conn->conn_logout_comp);
+ 	init_completion(&conn->rx_half_close_comp);
+ 	init_completion(&conn->tx_half_close_comp);
++	init_completion(&conn->rx_login_comp);
+ 	spin_lock_init(&conn->cmd_lock);
+ 	spin_lock_init(&conn->conn_usage_lock);
+ 	spin_lock_init(&conn->immed_queue_lock);
+@@ -718,6 +719,7 @@ int iscsit_start_kthreads(struct iscsi_conn *conn)
+ 
+ 	return 0;
+ out_tx:
++	send_sig(SIGINT, conn->tx_thread, 1);
+ 	kthread_stop(conn->tx_thread);
+ 	conn->tx_thread_active = false;
+ out_bitmap:
+@@ -728,7 +730,7 @@ out_bitmap:
+ 	return ret;
+ }
+ 
+-int iscsi_post_login_handler(
++void iscsi_post_login_handler(
+ 	struct iscsi_np *np,
+ 	struct iscsi_conn *conn,
+ 	u8 zero_tsih)
+@@ -738,7 +740,6 @@ int iscsi_post_login_handler(
+ 	struct se_session *se_sess = sess->se_sess;
+ 	struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+-	int rc;
+ 
+ 	iscsit_inc_conn_usage_count(conn);
+ 
+@@ -779,10 +780,6 @@ int iscsi_post_login_handler(
+ 			sess->sess_ops->InitiatorName);
+ 		spin_unlock_bh(&sess->conn_lock);
+ 
+-		rc = iscsit_start_kthreads(conn);
+-		if (rc)
+-			return rc;
+-
+ 		iscsi_post_login_start_timers(conn);
+ 		/*
+ 		 * Determine CPU mask to ensure connection's RX and TX kthreads
+@@ -791,15 +788,20 @@ int iscsi_post_login_handler(
+ 		iscsit_thread_get_cpumask(conn);
+ 		conn->conn_rx_reset_cpumask = 1;
+ 		conn->conn_tx_reset_cpumask = 1;
+-
++		/*
++		 * Wakeup the sleeping iscsi_target_rx_thread() now that
++		 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
++		 */
++		complete(&conn->rx_login_comp);
+ 		iscsit_dec_conn_usage_count(conn);
++
+ 		if (stop_timer) {
+ 			spin_lock_bh(&se_tpg->session_lock);
+ 			iscsit_stop_time2retain_timer(sess);
+ 			spin_unlock_bh(&se_tpg->session_lock);
+ 		}
+ 		iscsit_dec_session_usage_count(sess);
+-		return 0;
++		return;
+ 	}
+ 
+ 	iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
+@@ -840,10 +842,6 @@ int iscsi_post_login_handler(
+ 		" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+ 	spin_unlock_bh(&se_tpg->session_lock);
+ 
+-	rc = iscsit_start_kthreads(conn);
+-	if (rc)
+-		return rc;
+-
+ 	iscsi_post_login_start_timers(conn);
+ 	/*
+ 	 * Determine CPU mask to ensure connection's RX and TX kthreads
+@@ -852,10 +850,12 @@ int iscsi_post_login_handler(
+ 	iscsit_thread_get_cpumask(conn);
+ 	conn->conn_rx_reset_cpumask = 1;
+ 	conn->conn_tx_reset_cpumask = 1;
+-
++	/*
++	 * Wakeup the sleeping iscsi_target_rx_thread() now that
++	 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
++	 */
++	complete(&conn->rx_login_comp);
+ 	iscsit_dec_conn_usage_count(conn);
+-
+-	return 0;
+ }
+ 
+ static void iscsi_handle_login_thread_timeout(unsigned long data)
+@@ -1331,20 +1331,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ 	if (iscsi_target_start_negotiation(login, conn) < 0)
+ 		goto new_sess_out;
+ 
+-	if (!conn->sess) {
+-		pr_err("struct iscsi_conn session pointer is NULL!\n");
+-		goto new_sess_out;
+-	}
+-
+ 	iscsi_stop_login_thread_timer(np);
+ 
+-	if (signal_pending(current))
+-		goto new_sess_out;
+-
+-	ret = iscsi_post_login_handler(np, conn, zero_tsih);
+-
+-	if (ret < 0)
+-		goto new_sess_out;
++	iscsi_post_login_handler(np, conn, zero_tsih);
+ 
+ 	iscsit_deaccess_np(np, tpg);
+ 	tpg = NULL;
+diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
+index 63efd2878451..6d7eb66de94b 100644
+--- a/drivers/target/iscsi/iscsi_target_login.h
++++ b/drivers/target/iscsi/iscsi_target_login.h
+@@ -12,6 +12,7 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
+ extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
+ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
+ extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
++extern int iscsit_start_kthreads(struct iscsi_conn *);
+ extern int iscsi_target_login_thread(void *);
+ extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
+ 
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index 72d9dec991c0..77c276acccb6 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -19,6 +19,7 @@
+  ******************************************************************************/
+ 
+ #include <linux/ctype.h>
++#include <linux/kthread.h>
+ #include <scsi/iscsi_proto.h>
+ #include <target/target_core_base.h>
+ #include <target/target_core_fabric.h>
+@@ -352,10 +353,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
+ 		ntohl(login_rsp->statsn), login->rsp_length);
+ 
+ 	padding = ((-login->rsp_length) & 3);
++	/*
++	 * Before sending the last login response containing the transition
++	 * bit for full-feature-phase, go ahead and start up TX/RX threads
++	 * now to avoid potential resource allocation failures after the
++	 * final login response has been sent.
++	 */
++	if (login->login_complete) {
++		int rc = iscsit_start_kthreads(conn);
++		if (rc) {
++			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
++					    ISCSI_LOGIN_STATUS_NO_RESOURCES);
++			return -1;
++		}
++	}
+ 
+ 	if (conn->conn_transport->iscsit_put_login_tx(conn, login,
+ 					login->rsp_length + padding) < 0)
+-		return -1;
++		goto err;
+ 
+ 	login->rsp_length		= 0;
+ 	mutex_lock(&sess->cmdsn_mutex);
+@@ -364,6 +379,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
+ 	mutex_unlock(&sess->cmdsn_mutex);
+ 
+ 	return 0;
++
++err:
++	if (login->login_complete) {
++		if (conn->rx_thread && conn->rx_thread_active) {
++			send_sig(SIGINT, conn->rx_thread, 1);
++			kthread_stop(conn->rx_thread);
++		}
++		if (conn->tx_thread && conn->tx_thread_active) {
++			send_sig(SIGINT, conn->tx_thread, 1);
++			kthread_stop(conn->tx_thread);
++		}
++		spin_lock(&iscsit_global->ts_bitmap_lock);
++		bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
++				      get_order(1));
++		spin_unlock(&iscsit_global->ts_bitmap_lock);
++	}
++	return -1;
+ }
+ 
+ static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 95fe1a432d29..fde0277adc2c 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -85,7 +85,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
+ 		return 0;
+ 	/* offset in TRBs */
+ 	segment_offset = trb - seg->trbs;
+-	if (segment_offset > TRBS_PER_SEGMENT)
++	if (segment_offset >= TRBS_PER_SEGMENT)
+ 		return 0;
+ 	return seg->dma + (segment_offset * sizeof(*trb));
+ }
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index 5aaa2b675116..af9f82a1fcde 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
+ 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ 	},
++	{ USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
+ 	/* AT&T Direct IP LTE modems */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+ 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 3c8803feba26..474d11499d0e 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -534,12 +534,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
+ 
+ 	pr_debug("priv %p\n", priv);
+ 
++	mutex_lock(&priv->lock);
+ 	while (!list_empty(&priv->maps)) {
+ 		map = list_entry(priv->maps.next, struct grant_map, next);
+ 		list_del(&map->next);
+ 		gntdev_put_map(NULL /* already removed */, map);
+ 	}
+ 	WARN_ON(!list_empty(&priv->freeable_maps));
++	mutex_unlock(&priv->lock);
+ 
+ 	if (use_ptemod)
+ 		mmu_notifier_unregister(&priv->mn, priv->mm);
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index fc6b49bf7360..f08b3b729d3d 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -299,16 +299,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
+ 					 unsigned int flags)
+ {
+ 	struct fsnotify_mark *lmark, *mark;
++	LIST_HEAD(to_free);
+ 
++	/*
++	 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
++	 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
++	 * to_free list so we have to use mark_mutex even when accessing that
++	 * list. And freeing mark requires us to drop mark_mutex. So we can
++	 * reliably free only the first mark in the list. That's why we first
++	 * move marks to free to to_free list in one go and then free marks in
++	 * to_free list one by one.
++	 */
+ 	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+ 	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
+-		if (mark->flags & flags) {
+-			fsnotify_get_mark(mark);
+-			fsnotify_destroy_mark_locked(mark, group);
+-			fsnotify_put_mark(mark);
+-		}
++		if (mark->flags & flags)
++			list_move(&mark->g_list, &to_free);
+ 	}
+ 	mutex_unlock(&group->mark_mutex);
++
++	while (1) {
++		mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
++		if (list_empty(&to_free)) {
++			mutex_unlock(&group->mark_mutex);
++			break;
++		}
++		mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
++		fsnotify_get_mark(mark);
++		fsnotify_destroy_mark_locked(mark, group);
++		mutex_unlock(&group->mark_mutex);
++		fsnotify_put_mark(mark);
++	}
+ }
+ 
+ /*
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 3a44a648dae7..b294deb27d17 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -3971,9 +3971,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
+ 	osb->dc_work_sequence = osb->dc_wake_sequence;
+ 
+ 	processed = osb->blocked_lock_count;
+-	while (processed) {
+-		BUG_ON(list_empty(&osb->blocked_lock_list));
+-
++	/*
++	 * blocked lock processing in this loop might call iput which can
++	 * remove items off osb->blocked_lock_list. Downconvert up to
++	 * 'processed' number of locks, but stop short if we had some
++	 * removed in ocfs2_mark_lockres_freeing when downconverting.
++	 */
++	while (processed && !list_empty(&osb->blocked_lock_list)) {
+ 		lockres = list_entry(osb->blocked_lock_list.next,
+ 				     struct ocfs2_lock_res, l_blocked_list);
+ 		list_del_init(&lockres->l_blocked_list);
+diff --git a/fs/signalfd.c b/fs/signalfd.c
+index 424b7b65321f..148f8e7af882 100644
+--- a/fs/signalfd.c
++++ b/fs/signalfd.c
+@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
+ 		 * Other callers might not initialize the si_lsb field,
+ 		 * so check explicitly for the right codes here.
+ 		 */
+-		if (kinfo->si_code == BUS_MCEERR_AR ||
+-		    kinfo->si_code == BUS_MCEERR_AO)
++		if (kinfo->si_signo == SIGBUS &&
++		    (kinfo->si_code == BUS_MCEERR_AR ||
++		     kinfo->si_code == BUS_MCEERR_AO))
+ 			err |= __put_user((short) kinfo->si_addr_lsb,
+ 					  &uinfo->ssi_addr_lsb);
+ #endif
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index bb0248fc5187..82bb5e81ef57 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -143,7 +143,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
+ 		if (!leaf)
+ 			return -ENOMEM;
+ 		INIT_LIST_HEAD(&leaf->msg_list);
+-		info->qsize += sizeof(*leaf);
+ 	}
+ 	leaf->priority = msg->m_type;
+ 	rb_link_node(&leaf->rb_node, parent, p);
+@@ -188,7 +187,6 @@ try_again:
+ 			     "lazy leaf delete!\n");
+ 		rb_erase(&leaf->rb_node, &info->msg_tree);
+ 		if (info->node_cache) {
+-			info->qsize -= sizeof(*leaf);
+ 			kfree(leaf);
+ 		} else {
+ 			info->node_cache = leaf;
+@@ -201,7 +199,6 @@ try_again:
+ 		if (list_empty(&leaf->msg_list)) {
+ 			rb_erase(&leaf->rb_node, &info->msg_tree);
+ 			if (info->node_cache) {
+-				info->qsize -= sizeof(*leaf);
+ 				kfree(leaf);
+ 			} else {
+ 				info->node_cache = leaf;
+@@ -1026,7 +1023,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
+ 		/* Save our speculative allocation into the cache */
+ 		INIT_LIST_HEAD(&new_leaf->msg_list);
+ 		info->node_cache = new_leaf;
+-		info->qsize += sizeof(*new_leaf);
+ 		new_leaf = NULL;
+ 	} else {
+ 		kfree(new_leaf);
+@@ -1133,7 +1129,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
+ 		/* Save our speculative allocation into the cache */
+ 		INIT_LIST_HEAD(&new_leaf->msg_list);
+ 		info->node_cache = new_leaf;
+-		info->qsize += sizeof(*new_leaf);
+ 	} else {
+ 		kfree(new_leaf);
+ 	}
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 113411bfe8b1..2e51bcbea1e3 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2768,7 +2768,8 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
+ 		 * Other callers might not initialize the si_lsb field,
+ 		 * so check explicitly for the right codes here.
+ 		 */
+-		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
++		if (from->si_signo == SIGBUS &&
++		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
+ 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
+ #endif
+ 		break;
+@@ -3035,7 +3036,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
+ 			int, sig,
+ 			struct compat_siginfo __user *, uinfo)
+ {
+-	siginfo_t info;
++	siginfo_t info = {};
+ 	int ret = copy_siginfo_from_user32(&info, uinfo);
+ 	if (unlikely(ret))
+ 		return ret;
+@@ -3081,7 +3082,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
+ 			int, sig,
+ 			struct compat_siginfo __user *, uinfo)
+ {
+-	siginfo_t info;
++	siginfo_t info = {};
+ 
+ 	if (copy_siginfo_from_user32(&info, uinfo))
+ 		return -EFAULT;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index a2fd7e759cb7..233f0011f768 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -730,20 +730,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
+ 			 * could easily OOM just because too many pages are in
+ 			 * writeback and there is nothing else to reclaim.
+ 			 *
+-			 * Check __GFP_IO, certainly because a loop driver
++			 * Require may_enter_fs to wait on writeback, because
++			 * fs may not have submitted IO yet. And a loop driver
+ 			 * thread might enter reclaim, and deadlock if it waits
+ 			 * on a page for which it is needed to do the write
+ 			 * (loop masks off __GFP_IO|__GFP_FS for this reason);
+ 			 * but more thought would probably show more reasons.
+-			 *
+-			 * Don't require __GFP_FS, since we're not going into
+-			 * the FS, just waiting on its writeback completion.
+-			 * Worryingly, ext4 gfs2 and xfs allocate pages with
+-			 * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so
+-			 * testing may_enter_fs here is liable to OOM on them.
+ 			 */
+ 			if (global_reclaim(sc) ||
+-			    !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
++			    !PageReclaim(page) || !may_enter_fs) {
+ 				/*
+ 				 * This is slightly racy - end_page_writeback()
+ 				 * might have just cleared PageReclaim, then
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index e2642ba88b2d..2ba07ae70879 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -787,9 +787,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
+ 
+ 	spec->spdif_present = spdif_present;
+ 	/* SPDIF TX on/off */
+-	if (spdif_present)
+-		snd_hda_set_pin_ctl(codec, spdif_pin,
+-				    spdif_present ? PIN_OUT : 0);
++	snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
+ 
+ 	cs_automute(codec);
+ }


             reply	other threads:[~2015-08-17 22:08 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-08-17 22:08 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2017-11-05 18:50 [gentoo-commits] proj/linux-patches:3.10 commit in: / Mike Pagano
2017-09-15 16:27 Mike Pagano
2017-03-02 16:48 Mike Pagano
2017-02-27 18:32 Mike Pagano
2017-02-10 12:29 Mike Pagano
2016-12-09 18:31 Mike Pagano
2016-10-21 10:55 Mike Pagano
2016-08-28 21:54 Mike Pagano
2016-06-20 23:16 Mike Pagano
2016-03-16 19:40 Mike Pagano
2016-03-10  0:48 Mike Pagano
2016-03-04  0:10 Mike Pagano
2016-02-25 20:31 Mike Pagano
2016-02-20  0:06 Mike Pagano
2016-01-31 23:15 Mike Pagano
2016-01-23 18:26 Mike Pagano
2015-12-10 13:50 Mike Pagano
2015-11-09 23:39 Mike Pagano
2015-10-27 13:41 Mike Pagano
2015-10-23 22:49 Mike Pagano
2015-10-01 13:13 Mike Pagano
2015-09-21 17:36 Mike Pagano
2015-09-14 16:00 Mike Pagano
2015-08-10 22:52 Mike Pagano
2015-08-04  0:16 Mike Pagano
2015-07-30 12:56 Mike Pagano
2015-07-10 23:38 Mike Pagano
2015-07-07  0:43 Mike Pagano
2015-06-30 13:13 Mike Pagano
2015-06-23 11:58 Mike Pagano
2015-06-06 22:30 Mike Pagano
2015-05-17 18:41 Mike Pagano
2015-05-08 13:05 Mike Pagano
2015-04-20  9:38 Mike Pagano
2015-04-14 13:17 Mike Pagano
2015-03-28 20:02 Mike Pagano
2015-03-26 17:16 Mike Pagano
2015-03-19 23:09 Mike Pagano
2015-03-07 15:05 Mike Pagano
2015-02-27 18:35 Mike Pagano
2015-02-14 21:25 Mike Pagano
2015-02-11 15:33 Mike Pagano
2015-02-07  1:45 Mike Pagano
2015-01-30 12:51 Mike Pagano
2015-01-28  0:09 Mike Pagano
2015-01-17  1:36 Mike Pagano
2015-01-09 19:08 Mike Pagano
2015-01-02 19:12 Mike Pagano
2014-12-16 20:51 Mike Pagano
2014-12-10  1:35 Mike Pagano
2014-11-22 20:17 Mike Pagano
2014-11-14 19:55 Mike Pagano
2014-10-31 11:21 Mike Pagano
2014-10-15 22:24 Mike Pagano
2014-10-09 23:31 Mike Pagano
2014-10-06 16:23 Mike Pagano
2014-09-17 22:07 Anthony G. Basile
2014-09-17 21:56 Anthony G. Basile
2014-08-14 12:21 Mike Pagano
2014-08-08 17:54 Mike Pagano
2014-08-02 15:28 Mike Pagano
2014-07-28 19:41 Mike Pagano
2014-07-18 11:56 Mike Pagano
2014-07-09 23:40 Mike Pagano
2014-07-08  0:24 Mike Pagano
2014-07-01 12:57 Mike Pagano
2014-06-27 15:38 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1439848535.88995e4f17dee7d97d16d8029ddf0bc0c0a6de68.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox